diff --git a/modules/dnns_easily_fooled/.gitignore b/modules/dnns_easily_fooled/.gitignore new file mode 100644 index 000000000..d7ca9aff7 --- /dev/null +++ b/modules/dnns_easily_fooled/.gitignore @@ -0,0 +1,29 @@ +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll +*.pyc + +# Fortran module files +*.mod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app diff --git a/modules/dnns_easily_fooled/Installation_Guide.pdf b/modules/dnns_easily_fooled/Installation_Guide.pdf new file mode 100644 index 000000000..fce91a249 Binary files /dev/null and b/modules/dnns_easily_fooled/Installation_Guide.pdf differ diff --git a/modules/dnns_easily_fooled/README.md b/modules/dnns_easily_fooled/README.md new file mode 100644 index 000000000..303b83d5b --- /dev/null +++ b/modules/dnns_easily_fooled/README.md @@ -0,0 +1,52 @@ +# Fooling Code + +This is the code base used to reproduce the "fooling" images in the paper: + +[Nguyen A](http://anhnguyen.me), [Yosinski J](http://yosinski.com/), [Clune J](http://jeffclune.com). ["Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images"](http://arxiv.org/abs/1412.1897). In Computer Vision and Pattern Recognition (CVPR '15), IEEE, 2015. + +**If you use this software in an academic article, please cite:** + + @inproceedings{nguyen2015deep, + title={Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images}, + author={Nguyen, Anh and Yosinski, Jason and Clune, Jeff}, + booktitle={Computer Vision and Pattern Recognition (CVPR), 2015 IEEE Conference on}, + year={2015}, + organization={IEEE} + } + +For more information regarding the paper, please visit www.evolvingai.org/fooling + +## Requirements +This is an installation process that requires two main software packages (included in this package): + +1. Caffe: http://caffe.berkeleyvision.org + * Our libraries installed to work with Caffe + * Cuda 6.0 + * Boost 1.52 + * g++ 4.6 +2. Sferes: https://github.com/jbmouret/sferes2 + * Our libraries installed to work with Sferes + * OpenCV 2.4.10 + * Boost 1.52 + * g++ 4.9 (a C++ compiler compatible with C++11 standard) + +Note: These are specific versions of the two frameworks with our additional work necessary to produce the images as in the paper. They are not the same as their master branches. + +## Installation + +Please see the [Installation_Guide](https://github.com/Evolving-AI-Lab/fooling/wiki/Installation-Guide) for more details. + +## Usage + +* An MNIST experiment (Fig. 4, 5 in the paper) can be run directly on a local machine (4-core) within a reasonable amount of time (around ~5 minutes or less for 200 generations). +* An ImageNet experiment needs to be run on a cluster environment. It took us ~4 days x 128 cores to run 5000 generations and produce 1000 images (Fig. 8 in the paper). +* [How to configure an experiment to test the evolutionary framework quickly](https://github.com/Evolving-AI-Lab/fooling/wiki/How-to-test-the-evolutionary-framework-quickly) +* To reproduce the gradient ascent fooling images (Figures 13, S3, S4, S5, S6, and S7 from the paper), see the [documentation in the caffe/ascent directory](https://github.com/Evolving-AI-Lab/fooling/tree/ascent/caffe/ascent). You'll need to use the `ascent` branch instead of master, because the two required versions of Caffe are different. + +## Updates + +* Our fork project [here](https://github.com/Evolving-AI-Lab/innovation-engine) has support for the **latest Caffe** and experiments to create *recognizable* images instead of unrecognizable. + +## License + +Please refer to the licenses of Sferes and Caffe projects. diff --git a/modules/dnns_easily_fooled/caffe/.gitignore b/modules/dnns_easily_fooled/caffe/.gitignore new file mode 100644 index 000000000..7d8dea01c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/.gitignore @@ -0,0 +1,65 @@ +## General + +# Compiled Object files +*.slo +*.lo +*.o +*.cuo +*.png +*.jpg +*.jpeg +# Compiled Dynamic libraries +*.so +*.dylib + +# Compiled Static libraries +*.lai +*.la +*.a + +# Compiled protocol buffers +*.pb.h +*.pb.cc +*_pb2.py + +# Compiled python +*.pyc + +# Compiled MATLAB +*.mex* + +# build, distribute, and bins +build +.build_debug/* +.build_release/* +distribute/* +*.testbin +*.bin +python/caffe/proto/ + +# Editor temporaries +*.swp +*~ + +# IPython notebook checkpoints +.ipynb_checkpoints + +## Caffe + +# User's build configuration +#Makefile.config + +# Data and examples are either +# 1. reference, and not casually committed +# 2. custom, and live on their own unless they're deliberated contributed +data/* +examples/* + +# Generated documentation +docs/_site +_site + +# Sublime Text settings +*.sublime-workspace +*.sublime-project + diff --git a/modules/dnns_easily_fooled/caffe/CONTRIBUTORS.md b/modules/dnns_easily_fooled/caffe/CONTRIBUTORS.md new file mode 100644 index 000000000..2de2a717e --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/CONTRIBUTORS.md @@ -0,0 +1,17 @@ +# Contributors + +Caffe is developed by a core set of BVLC members and the open-source community. + +We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! + +**For the detailed history of contributions** of a given file, try + + git blame file + +to see line-by-line credits and + + git log --follow file + +to see the change log even across renames and rewrites. + +Please refer to the [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements) on the Caffe site for further details. diff --git a/modules/dnns_easily_fooled/caffe/INSTALL.md b/modules/dnns_easily_fooled/caffe/INSTALL.md new file mode 100644 index 000000000..42fcf027e --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/INSTALL.md @@ -0,0 +1,7 @@ +# Installation + +See http://caffe.berkeleyvision.org/installation.html for the latest +installation instructions. + +Check the issue tracker in case you need help: +https://github.com/BVLC/caffe/issues diff --git a/modules/dnns_easily_fooled/caffe/LICENSE b/modules/dnns_easily_fooled/caffe/LICENSE new file mode 100644 index 000000000..bac9c99fd --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2014, The Regents of the University of California (Regents) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/modules/dnns_easily_fooled/caffe/Makefile b/modules/dnns_easily_fooled/caffe/Makefile new file mode 100644 index 000000000..943165a46 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/Makefile @@ -0,0 +1,439 @@ +# The makefile for caffe. Pretty hacky. +PROJECT := caffe + +CONFIG_FILE := Makefile.config +include $(CONFIG_FILE) + +BUILD_DIR_LINK := $(BUILD_DIR) +RELEASE_BUILD_DIR := .$(BUILD_DIR)_release +DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug + +DEBUG ?= 0 +ifeq ($(DEBUG), 1) + BUILD_DIR := $(DEBUG_BUILD_DIR) + OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) +else + BUILD_DIR := $(RELEASE_BUILD_DIR) + OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) +endif + +# The target static library and shared library name +LIB_BUILD_DIR := $(BUILD_DIR)/lib +NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so +STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a + +############################## +# Get all source files +############################## +# CXX_SRCS are the source files excluding the test ones. +CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") +# HXX_SRCS are the header files +HXX_SRCS := $(shell find include/$(PROJECT) -name "*.hpp") +# CU_SRCS are the cuda source files +CU_SRCS := $(shell find src/$(PROJECT) -name "*.cu") +# TEST_SRCS are the test source files +TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp +TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") +TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) +GTEST_SRC := src/gtest/gtest-all.cpp +# TEST_HDRS are the test header files +TEST_HDRS := $(shell find src/$(PROJECT) -name "test_*.hpp") +# TOOL_SRCS are the source files for the tool binaries +TOOL_SRCS := $(shell find tools -name "*.cpp") +# EXAMPLE_SRCS are the source files for the example binaries +EXAMPLE_SRCS := $(shell find examples -name "*.cpp") +# BUILD_INCLUDE_DIR contains any generated header files we want to include. +BUILD_INCLUDE_DIR := $(BUILD_DIR)/src +# PROTO_SRCS are the protocol buffer definitions +PROTO_SRC_DIR := src/$(PROJECT)/proto +PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) +# PROTO_BUILD_DIR will contain the .cc and obj files generated from +# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files +PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) +PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto +# NONGEN_CXX_SRCS includes all source/header files except those generated +# automatically (e.g., by proto). +NONGEN_CXX_SRCS := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/$(PROJECT) \ + matlab/$(PROJECT) \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") +LINT_REPORT := $(BUILD_DIR)/cpp_lint.log +FAILED_LINT_REPORT := $(BUILD_DIR)/cpp_lint.error_log +# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) +PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp +PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so +# MAT$(PROJECT)_SRC is the matlab wrapper for $(PROJECT) +MAT$(PROJECT)_SRC := matlab/$(PROJECT)/mat$(PROJECT).cpp +ifneq ($(MATLAB_DIR),) + MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) +endif +MAT$(PROJECT)_SO := matlab/$(PROJECT)/$(PROJECT).$(MAT_SO_EXT) + +############################## +# Derive generated files +############################## +# The generated files for protocol buffers +PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +HXX_SRCS += $(PROTO_GEN_HEADER) +PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) +PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto +PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py +PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ + $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) +# The objects corresponding to the source files +# These objects will be linked into the final shared library, so we +# exclude the tool, example, and test objects. +CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) +CU_OBJS := $(addprefix $(BUILD_DIR)/, ${CU_SRCS:.cu=.cuo}) +PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} +OBJ_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT) +LAYER_BUILD_DIR := $(OBJ_BUILD_DIR)/layers +UTIL_BUILD_DIR := $(OBJ_BUILD_DIR)/util +OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) +# tool, example, and test objects +TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) +TOOL_BUILD_DIR := $(BUILD_DIR)/tools +TEST_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test +TEST_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) +GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +GTEST_BUILD_DIR := $(dir $(GTEST_OBJ)) +EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) +EXAMPLE_BUILD_DIR := $(BUILD_DIR)/examples +EXAMPLE_BUILD_DIRS := $(EXAMPLE_BUILD_DIR) +EXAMPLE_BUILD_DIRS += $(foreach obj,$(EXAMPLE_OBJS),$(dir $(obj))) +# tool, example, and test bins +TOOL_BINS := ${TOOL_OBJS:.o=.bin} +EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} +# Put the test binaries in build/test for convenience. +TEST_BIN_DIR := $(BUILD_DIR)/test +TEST_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_OBJS),$(basename $(notdir $(obj)))))) +TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin + +############################## +# Derive include and lib directories +############################## +CUDA_INCLUDE_DIR := $(CUDA_DIR)/include +CUDA_LIB_DIR := $(CUDA_DIR)/lib64 $(CUDA_DIR)/lib + +INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) +INCLUDE_DIRS += ./src ./include $(CUDA_INCLUDE_DIR) +LIBRARY_DIRS += $(CUDA_LIB_DIR) +LIBRARIES := cudart cublas curand \ + pthread \ + glog protobuf leveldb snappy \ + lmdb \ + boost_system \ + hdf5_hl hdf5 \ + opencv_core opencv_highgui opencv_imgproc +PYTHON_LIBRARIES := boost_python python2.7 +WARNINGS := -Wall + +############################## +# Set build directories +############################## + +DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib +DIST_ALIASES := dist +ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) + DIST_ALIASES += distribute +endif + +ALL_BUILD_DIRS := $(sort \ + $(BUILD_DIR) $(LIB_BUILD_DIR) $(OBJ_BUILD_DIR) \ + $(LAYER_BUILD_DIR) $(UTIL_BUILD_DIR) $(TOOL_BUILD_DIR) \ + $(TEST_BUILD_DIR) $(TEST_BIN_DIR) $(GTEST_BUILD_DIR) \ + $(EXAMPLE_BUILD_DIRS) \ + $(PROTO_BUILD_DIR) $(PROTO_BUILD_INCLUDE_DIR) $(PY_PROTO_BUILD_DIR) \ + $(DISTRIBUTE_SUBDIRS)) + +############################## +# Configure build +############################## + +# Determine platform +UNAME := $(shell uname -s) +ifeq ($(UNAME), Linux) + LINUX := 1 +else ifeq ($(UNAME), Darwin) + OSX := 1 +endif + +ifeq ($(LINUX), 1) + CXX := /usr/bin/g++ +endif + +# OS X: +# clang++ instead of g++ +# libstdc++ instead of libc++ for CUDA compatibility on 10.9 +ifeq ($(OSX), 1) + CXX := /usr/bin/clang++ + ifneq ($(findstring 10.9, $(shell sw_vers -productVersion)),) + CXXFLAGS += -stdlib=libstdc++ + endif +endif + +# Debugging +ifeq ($(DEBUG), 1) + COMMON_FLAGS := -DDEBUG -g -O0 +else + COMMON_FLAGS := -DNDEBUG -O2 +endif + +# BLAS configuration (default = ATLAS) +BLAS ?= atlas +ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt + COMMON_FLAGS += -DUSE_MKL + MKL_DIR = /opt/intel/mkl + BLAS_INCLUDE ?= $(MKL_DIR)/include + BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 +else ifeq ($(BLAS), open) + # OpenBLAS + LIBRARIES += openblas +else + # ATLAS + ifeq ($(LINUX), 1) + ifeq ($(BLAS), atlas) + # Linux simply has cblas and atlas + LIBRARIES += cblas atlas + endif + else ifeq ($(OSX), 1) + # OS X packages atlas as the vecLib framework + BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ + LIBRARIES += cblas + LDFLAGS += -framework vecLib + endif +endif +INCLUDE_DIRS += $(BLAS_INCLUDE) +LIBRARY_DIRS += $(BLAS_LIB) + +# Complete build flags. +COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) +CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) +NVCCFLAGS := -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) +LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) \ + $(foreach library,$(LIBRARIES),-l$(library)) +PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) + +# 'superclean' target recursively* deletes all files ending with an extension +# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older +# versions of Caffe that do not place all generated files in a location known +# to the 'clean' target. +# +# 'supercleanlist' will list the files to be deleted by make superclean. +# +# * Recursive with the exception that symbolic links are never followed, per the +# default behavior of 'find'. +SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo + +############################## +# Define build targets +############################## +.PHONY: all test clean linecount lint tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles + +all: $(NAME) $(STATIC_NAME) tools examples + +linecount: clean + cloc --read-lang-def=$(PROJECT).cloc src/$(PROJECT)/ + +lint: $(LINT_REPORT) + +$(LINT_REPORT): $(NONGEN_CXX_SRCS) | $(BUILD_DIR) + @ (python ./scripts/cpp_lint.py $(NONGEN_CXX_SRCS) > $(LINT_REPORT) 2>&1 \ + && ($(RM) $(FAILED_LINT_REPORT); echo "No lint errors!")) || ( \ + mv $(LINT_REPORT) $(FAILED_LINT_REPORT); \ + grep -v "^Done processing " $(FAILED_LINT_REPORT); \ + echo "Found 1 or more lint errors; see log at $(FAILED_LINT_REPORT)"; \ + exit 1) + +test: $(TEST_ALL_BIN) $(TEST_BINS) + +tools: $(TOOL_BINS) + +examples: $(EXAMPLE_BINS) + +py$(PROJECT): py + +py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) + +$(PY$(PROJECT)_SO): $(STATIC_NAME) $(PY$(PROJECT)_SRC) + $(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ + $(STATIC_NAME) $(CXXFLAGS) $(PYTHON_LDFLAGS) + @ echo + +mat$(PROJECT): mat + +mat: $(MAT$(PROJECT)_SO) + +$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) + @ if [ -z "$(MATLAB_DIR)" ]; then \ + echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ + "to build mat$(PROJECT)."; \ + exit 1; \ + fi + $(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) $(STATIC_NAME) \ + CXXFLAGS="\$$CXXFLAGS $(CXXFLAGS) $(WARNINGS)" \ + CXXLIBS="\$$CXXLIBS $(LDFLAGS)" -o $@ + @ echo + +runtest: $(TEST_ALL_BIN) + $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle + +$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked + +# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link +# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it +# exists and $(DEBUG) is toggled later. +$(BUILD_DIR)/.linked: + @ mkdir -p $(BUILD_DIR) + @ $(RM) $(OTHER_BUILD_DIR)/.linked + @ $(RM) -r $(BUILD_DIR_LINK) + @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) + @ touch $@ + +$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) + @ mkdir -p $@ + +$(NAME): $(PROTO_OBJS) $(OBJS) | $(LIB_BUILD_DIR) + $(CXX) -shared -o $@ $(OBJS) $(CXXFLAGS) $(LDFLAGS) $(WARNINGS) + @ echo + +$(STATIC_NAME): $(PROTO_OBJS) $(OBJS) | $(LIB_BUILD_DIR) + ar rcs $@ $(PROTO_OBJS) $(OBJS) + @ echo + +$(TEST_BUILD_DIR)/%.o: src/$(PROJECT)/test/%.cpp $(HXX_SRCS) $(TEST_HDRS) \ + | $(TEST_BUILD_DIR) + $(CXX) $< $(CXXFLAGS) -c -o $@ + @ echo + +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) $(STATIC_NAME) \ + | $(TEST_BIN_DIR) + $(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) $(STATIC_NAME) \ + -o $@ $(CXXFLAGS) $(LDFLAGS) $(WARNINGS) + @ echo + +$(TEST_BIN_DIR)/%.testbin: $(TEST_BUILD_DIR)/%.o $(GTEST_OBJ) $(STATIC_NAME) \ + | $(TEST_BIN_DIR) + $(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) $(STATIC_NAME) \ + -o $@ $(CXXFLAGS) $(LDFLAGS) $(WARNINGS) + @ echo + +$(TOOL_BINS): %.bin : %.o $(STATIC_NAME) + $(CXX) $< $(STATIC_NAME) -o $@ $(CXXFLAGS) $(LDFLAGS) $(WARNINGS) + @ echo + +$(EXAMPLE_BINS): %.bin : %.o $(STATIC_NAME) + $(CXX) $< $(STATIC_NAME) -o $@ $(CXXFLAGS) $(LDFLAGS) $(WARNINGS) + @ echo + +$(LAYER_BUILD_DIR)/%.o: src/$(PROJECT)/layers/%.cpp $(HXX_SRCS) \ + | $(LAYER_BUILD_DIR) + $(CXX) $< $(CXXFLAGS) -c -o $@ + @ echo + +$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ + | $(PROTO_BUILD_DIR) + $(CXX) $< $(CXXFLAGS) -c -o $@ + @ echo + +$(UTIL_BUILD_DIR)/%.o: src/$(PROJECT)/util/%.cpp $(HXX_SRCS) | $(UTIL_BUILD_DIR) + $(CXX) $< $(CXXFLAGS) -c -o $@ + @ echo + +$(GTEST_OBJ): $(GTEST_SRC) | $(GTEST_BUILD_DIR) + $(CXX) $< $(CXXFLAGS) -c -o $@ + @ echo + +$(LAYER_BUILD_DIR)/%.cuo: src/$(PROJECT)/layers/%.cu $(HXX_SRCS) \ + | $(LAYER_BUILD_DIR) + $(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ + @ echo + +$(UTIL_BUILD_DIR)/%.cuo: src/$(PROJECT)/util/%.cu | $(UTIL_BUILD_DIR) + $(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ + @ echo + +$(TOOL_BUILD_DIR)/%.o: tools/%.cpp $(PROTO_GEN_HEADER) | $(TOOL_BUILD_DIR) + $(CXX) $< $(CXXFLAGS) -c -o $@ + @ echo + +$(EXAMPLE_BUILD_DIR)/%.o: examples/%.cpp $(PROTO_GEN_HEADER) \ + | $(EXAMPLE_BUILD_DIRS) + $(CXX) $< $(CXXFLAGS) -c -o $@ + @ echo + +$(BUILD_DIR)/src/$(PROJECT)/%.o: src/$(PROJECT)/%.cpp $(HXX_SRCS) + $(CXX) $< $(CXXFLAGS) -c -o $@ + @ echo + +proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) + +$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ + $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) + protoc --proto_path=src --cpp_out=$(BUILD_DIR)/src $< + @ echo + +$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ + $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) + protoc --proto_path=src --python_out=python $< + @ echo + +$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) + touch $(PY_PROTO_INIT) + +clean: + @- $(RM) -rf $(ALL_BUILD_DIRS) + @- $(RM) -rf $(OTHER_BUILD_DIR) + @- $(RM) -rf $(BUILD_DIR_LINK) + @- $(RM) -rf $(DISTRIBUTE_DIR) + @- $(RM) $(PY$(PROJECT)_SO) + @- $(RM) $(MAT$(PROJECT)_SO) + +supercleanfiles: + $(eval SUPERCLEAN_FILES := $(strip \ + $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ + -not -path './data/*')))) + +supercleanlist: supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + fi + +superclean: clean supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo "Deleting the following generated files:"; \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + $(RM) $(SUPERCLEAN_FILES); \ + fi + +$(DIST_ALIASES): $(DISTRIBUTE_DIR) + +$(DISTRIBUTE_DIR): all py $(HXX_SRCS) | $(DISTRIBUTE_SUBDIRS) + # add include + cp -r include $(DISTRIBUTE_DIR)/ + # add tool and example binaries + cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin + cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin + # add libraries + cp $(NAME) $(DISTRIBUTE_DIR)/lib + cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib + # add python - it's not the standard way, indeed... + cp -r python $(DISTRIBUTE_DIR)/python diff --git a/modules/dnns_easily_fooled/caffe/Makefile.config b/modules/dnns_easily_fooled/caffe/Makefile.config new file mode 100755 index 000000000..7a9137b50 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/Makefile.config @@ -0,0 +1,56 @@ +## Refer to http://caffe.berkeleyvision.org/installation.html +# Contributions simplifying and improving our build system are welcome! + +# CUDA directory contains bin/ and lib/ directories that we need. +CUDA_DIR := /usr/local/cuda + +# CUDA architecture setting: going with all of them. +CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ + -gencode arch=compute_20,code=sm_21 \ + -gencode arch=compute_30,code=sm_30 \ + -gencode arch=compute_35,code=sm_35 + +# BLAS choice: +# atlas for ATLAS (default) +# mkl for MKL +# open for OpenBlas +BLAS := atlas +# Custom (MKL/ATLAS/OpenBLAS) include and lib directories. +# Leave commented to accept the defaults for your choice of BLAS +# (which should work)! +# BLAS_INCLUDE := /path/to/your/blas +BLAS_INCLUDE := /usr/include/atlas +# BLAS_LIB := /path/to/your/blas +BLAS_LIB := /usr/lib/atlas-base + +# This is required only if you will compile the matlab interface. +# MATLAB directory should contain the mex binary in /bin. +# MATLAB_DIR := /usr/local +# MATLAB_DIR := /Applications/MATLAB_R2012b.app + +# NOTE: this is required only if you will compile the python interface. +# We need to be able to find Python.h and numpy/arrayobject.h. +PYTHON_INCLUDE := /usr/local/include/python2.7 \ + /usr/include/python2.7 \ + /usr/local/lib/python2.7/dist-packages/numpy/core/include +# Anaconda Python distribution is quite popular. Include path: +# PYTHON_INCLUDE := $(HOME)/anaconda/include \ + # $(HOME)/anaconda/include/python2.7 \ + # $(HOME)/anaconda/lib/python2.7/site-packages/numpy/core/include + +# We need to be able to find libpythonX.X.so or .dylib. +PYTHON_LIB := /usr/local/lib +# PYTHON_LIB := $(HOME)/anaconda/lib + +# Whatever else you find you need goes here. +INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include +LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib + +BUILD_DIR := build +DISTRIBUTE_DIR := distribute + +# Uncomment for debugging. +# DEBUG := 1 + +# The ID of the GPU that 'make runtest' will use to run unit tests. +TEST_GPUID := 0 diff --git a/modules/dnns_easily_fooled/caffe/Makefile.config.example b/modules/dnns_easily_fooled/caffe/Makefile.config.example new file mode 100644 index 000000000..9754129be --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/Makefile.config.example @@ -0,0 +1,56 @@ +## Refer to http://caffe.berkeleyvision.org/installation.html +# Contributions simplifying and improving our build system are welcome! + +# CUDA directory contains bin/ and lib/ directories that we need. +CUDA_DIR := /usr/local/cuda +# On Ubuntu 14.04, if cuda tools are installed via +# "sudo apt-get install nvidia-cuda-toolkit" then use this instead: +# CUDA_DIR := /usr + +# CUDA architecture setting: going with all of them. +CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ + -gencode arch=compute_20,code=sm_21 \ + -gencode arch=compute_30,code=sm_30 \ + -gencode arch=compute_35,code=sm_35 + +# BLAS choice: +# atlas for ATLAS (default) +# mkl for MKL +# open for OpenBlas +BLAS := atlas +# Custom (MKL/ATLAS/OpenBLAS) include and lib directories. +# Leave commented to accept the defaults for your choice of BLAS +# (which should work)! +# BLAS_INCLUDE := /path/to/your/blas +# BLAS_LIB := /path/to/your/blas + +# This is required only if you will compile the matlab interface. +# MATLAB directory should contain the mex binary in /bin. +# MATLAB_DIR := /usr/local +# MATLAB_DIR := /Applications/MATLAB_R2012b.app + +# NOTE: this is required only if you will compile the python interface. +# We need to be able to find Python.h and numpy/arrayobject.h. +PYTHON_INCLUDE := /usr/local/include/python2.7 \ + /usr/local/lib/python2.7/dist-packages/numpy/core/include +# Anaconda Python distribution is quite popular. Include path: +# PYTHON_INCLUDE := $(HOME)/anaconda/include \ + # $(HOME)/anaconda/include/python2.7 \ + # $(HOME)/anaconda/lib/python2.7/site-packages/numpy/core/include + +# We need to be able to find libpythonX.X.so or .dylib. +PYTHON_LIB := /usr/local/lib +# PYTHON_LIB := $(HOME)/anaconda/lib + +# Whatever else you find you need goes here. +INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include +LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib + +BUILD_DIR := build +DISTRIBUTE_DIR := distribute + +# Uncomment for debugging. +# DEBUG := 1 + +# The ID of the GPU that 'make runtest' will use to run unit tests. +TEST_GPUID := 0 diff --git a/modules/dnns_easily_fooled/caffe/README.md b/modules/dnns_easily_fooled/caffe/README.md new file mode 100644 index 000000000..6b45624fe --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/README.md @@ -0,0 +1,115 @@ +[Caffe: Convolutional Architecture for Fast Feature Extraction](http://caffe.berkeleyvision.org) + +Created by [Yangqing Jia](http://daggerfs.com), UC Berkeley EECS department. +In active development by the Berkeley Vision and Learning Center ([BVLC](http://bvlc.eecs.berkeley.edu/)). + +## Introduction + +Caffe aims to provide computer vision scientists with a **clean, modifiable +implementation** of state-of-the-art deep learning algorithms. Network structure +is easily specified in separate config files, with no mess of hard-coded +parameters in the code. Python and Matlab wrappers are provided. + +At the same time, Caffe fits industry needs, with blazing fast C++/Cuda code for +GPU computation. Caffe is currently the fastest GPU CNN implementation publicly +available, and is able to process more than **40 million images per day** on a +single NVIDIA K40 GPU (or 20 million per day on a K20)\*. + +Caffe also provides **seamless switching between CPU and GPU**, which allows one +to train models with fast GPUs and then deploy them on non-GPU clusters with one +line of code: `Caffe::set_mode(Caffe::CPU)`. + +Even in CPU mode, computing predictions on an image takes only 20 ms when images +are processed in batch mode. + +* [Caffe introductory presentation](https://www.dropbox.com/s/10fx16yp5etb8dv/caffe-presentation.pdf) +* [Installation instructions](http://caffe.berkeleyvision.org/installation.html) + +\* When measured with the [SuperVision](http://www.image-net.org/challenges/LSVRC/2012/supervision.pdf) model that won the ImageNet Large Scale Visual Recognition Challenge 2012. + +## License + +Caffe is BSD 2-Clause licensed (refer to the +[LICENSE](http://caffe.berkeleyvision.org/license.html) for details). + +The pretrained models published by the BVLC, such as the +[Caffe reference ImageNet model](https://www.dropbox.com/s/n3jups0gr7uj0dv/caffe_reference_imagenet_model) +are licensed for academic research / non-commercial use only. However, Caffe is +a full toolkit for model training, so start brewing your own Caffe model today! + +## Citing Caffe + +Please kindly cite Caffe in your publications if it helps your research: + + @misc{Jia13caffe, + Author = {Yangqing Jia}, + Title = { {Caffe}: An Open Source Convolutional Architecture for Fast Feature Embedding}, + Year = {2013}, + Howpublished = {\url{http://caffe.berkeleyvision.org/}} + } + +## Documentation + +Tutorials and general documentation are written in Markdown format in the `docs/` folder. +While the format is quite easy to read directly, you may prefer to view the whole thing as a website. +To do so, simply run `jekyll serve -s docs` and view the documentation website at `http://0.0.0.0:4000` (to get [jekyll](http://jekyllrb.com/), you must have ruby and do `gem install jekyll`). + +We strive to provide provide lots of usage examples, and to document all code in docstrings. +We'd appreciate your contribution to this effort! + +## Development + +Caffe is developed with active participation of the community by the [Berkeley Vision and Learning Center](http://bvlc.eecs.berkeley.edu/). +We welcome all contributions! + +### The release cycle + +- The `dev` branch is for new development, including community contributions. We aim to keep it in a functional state, but large changes may occur and things may get broken every now and then. Use this if you want the "bleeding edge". +- The `master` branch is handled by BVLC, which will integrate changes from `dev` on a roughly monthly schedule, giving it a release tag. Use this if you want more stability. + +### Setting priorities + +- Make GitHub Issues for bugs, features you'd like to see, questions, etc. +- Development work is guided by [milestones](https://github.com/BVLC/caffe/issues?milestone=1), which are sets of issues selected for concurrent release (integration from `dev` to `master`). +- Please note that since the core developers are largely researchers, we may work on a feature in isolation from the open-source community for some time before releasing it, so as to claim honest academic contribution. We do release it as soon as a reasonable technical report may be written about the work, and we still aim to inform the community of ongoing development through Issues. + +### Contibuting + +- Do new development in [feature branches](https://www.atlassian.com/git/workflows#!workflow-feature-branch) with descriptive names. +- Bring your work up-to-date by [rebasing](http://git-scm.com/book/en/Git-Branching-Rebasing) onto the latest `dev`. (Polish your changes by [interactive rebase](https://help.github.com/articles/interactive-rebase), if you'd like.) +- [Pull request](https://help.github.com/articles/using-pull-requests) your contribution to BVLC/caffe's `dev` branch for discussion and review. + * PRs should live fast, die young, and leave a beautiful merge. Pull request sooner than later so that discussion can guide development. + * Code must be accompanied by documentation and tests at all times. + * Only fast-forward merges will be accepted. + +See our [development guidelines](http://caffe.berkeleyvision.org/development.html) for further details–the more closely these are followed, the sooner your work will be merged. + +#### [Shelhamer's](https://github.com/shelhamer) “life of a branch in four acts” + +Make the `feature` branch off of the latest `bvlc/dev` +``` +git checkout dev +git pull upstream dev +git checkout -b feature +# do your work, make commits +``` + +Prepare to merge by rebasing your branch on the latest `bvlc/dev` +``` +# make sure dev is fresh +git checkout dev +git pull upstream dev +# rebase your branch on the tip of dev +git checkout feature +git rebase dev +``` + +Push your branch to pull request it into `dev` +``` +git push origin feature +# ...make pull request to dev... +``` + +Now make a pull request! You can do this from the command line (`git pull-request -b dev`) if you install [hub](https://github.com/github/hub). + +The pull request of `feature` into `dev` will be a clean merge. Applause. diff --git a/modules/dnns_easily_fooled/caffe/ascent/README.md b/modules/dnns_easily_fooled/caffe/ascent/README.md new file mode 100644 index 000000000..6e44f14e9 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/ascent/README.md @@ -0,0 +1,66 @@ +### General + +This directory contains the necessary code to reproduce the gradient +ascent images in the paper: Figures 13, S3, S4, S5, S6, and S7. This +is research code, and so it may contain paths and such that are +particular to our setup that will need to be changed for your own +setup. + +**Important note: this code requires the slightly modified version of caffe in this repository's [ascent](https://github.com/Evolving-AI-Lab/fooling/tree/ascent) branch. If you try running on master, you'll get an error about `backward_from_layer`.** See the below steps for using the correct branch. + +If you find any bugs, please submit a PR! + +If you have any trouble getting the code to work, please get in touch, and we'll help where we can. + + + +### Notes on running the gradient ascent code + + * The gist of the gradient ascent code (along with a lot of +experimental bookkeeping) is in the +[find_image function in find_fooling_image.py](https://github.com/Evolving-AI-Lab/fooling/blob/master/caffe/ascent/find_fooling_image.py#L68-L274). + * If you happen to be working in a +cluster environment that uses ```qsub```, you may find the shell scripts +useful; otherwise they probably won't help you much. + * If you don't have a trained net around, you can download the trained model we used here: http://yosinski.cs.cornell.edu/yos_140311__caffenet_iter_450000 + * A file containing class labels is also used by the script and can be downloaded here: http://s.yosinski.com/synset_words.txt + + + +### Simple steps to generate one fooling image + +We'll walk through exact steps to generate a fooling image of a lion (class 291) using gradient ascent on the output unit for lion. + +First, clone the repo and checkout the ascent branch: + + [~] $ git clone git@github.com:Evolving-AI-Lab/fooling.git + [~] $ cd fooling + [~/fooling] $ git checkout ascent + [~/fooling] $ cd caffe + +Configure and compile caffe. See [installation instructions](http://caffe.berkeleyvision.org/installation.html). Make sure to compile the python bindings too: + + [~/fooling/caffe] $ make -j && make -j pycaffe + +Once Caffe is built, continue by fetching some auxiliary data (synsets.txt and a pre-trained model): + + [~/fooling/caffe] $ cd data/ilsvrc12 + [~/fooling/caffe/data/ilsvrc12] $ ./get_ilsvrc_aux.sh + [~/fooling/caffe/data/ilsvrc12] $ cd ../../ascent + [~/fooling/caffe/ascent] $ wget 'http://yosinski.cs.cornell.edu/yos_140311__caffenet_iter_450000' + +Now we're ready to run the optimization. To find a quick fooling image for the Lion class (idx 291) using only 3 gradient steps, run the following: + + [~/fooling/caffe/ascent] $ ./find_fooling_image.py --push_idx 291 --N 3 + ... + 0 Push idx: 291, val: 0.00209935 (n02129165 lion, king of beasts, Panthera leo) + Max idx: 815, val: 0.0114864 (n04275548 spider web, spider's web) + ... + 1 Push idx: 291, val: 0.00962483 (n02129165 lion, king of beasts, Panthera leo) + Max idx: 330, val: 0.0224016 (n02325366 wood rabbit, cottontail, cottontail rabbit) + ... + 2 Push idx: 291, val: 0.0518007 (n02129165 lion, king of beasts, Panthera leo) + Max idx: 291, val: 0.0518007 (n02129165 lion, king of beasts, Panthera leo) + ... + Result: majority success + diff --git a/modules/dnns_easily_fooled/caffe/ascent/deploy_1_forcebackward.prototxt b/modules/dnns_easily_fooled/caffe/ascent/deploy_1_forcebackward.prototxt new file mode 100644 index 000000000..a14b969a3 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/ascent/deploy_1_forcebackward.prototxt @@ -0,0 +1,213 @@ +name: "CaffeNet" +input: "data" +input_dim: 1 +input_dim: 3 +input_dim: 227 +input_dim: 227 +force_backward: true +layers { + name: "conv1" + type: CONVOLUTION + bottom: "data" + top: "conv1" + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + } +} +layers { + name: "relu1" + type: RELU + bottom: "conv1" + top: "conv1" +} +layers { + name: "pool1" + type: POOLING + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm1" + type: LRN + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv2" + type: CONVOLUTION + bottom: "norm1" + top: "conv2" + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + } +} +layers { + name: "relu2" + type: RELU + bottom: "conv2" + top: "conv2" +} +layers { + name: "pool2" + type: POOLING + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm2" + type: LRN + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv3" + type: CONVOLUTION + bottom: "norm2" + top: "conv3" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + } +} +layers { + name: "relu3" + type: RELU + bottom: "conv3" + top: "conv3" +} +layers { + name: "conv4" + type: CONVOLUTION + bottom: "conv3" + top: "conv4" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + } +} +layers { + name: "relu4" + type: RELU + bottom: "conv4" + top: "conv4" +} +layers { + name: "conv5" + type: CONVOLUTION + bottom: "conv4" + top: "conv5" + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + } +} +layers { + name: "relu5" + type: RELU + bottom: "conv5" + top: "conv5" +} +layers { + name: "pool5" + type: POOLING + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "fc6" + type: INNER_PRODUCT + bottom: "pool5" + top: "fc6" + inner_product_param { + num_output: 4096 + } +} +layers { + name: "relu6" + type: RELU + bottom: "fc6" + top: "fc6" +} +layers { + name: "drop6" + type: DROPOUT + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc7" + type: INNER_PRODUCT + bottom: "fc6" + top: "fc7" + inner_product_param { + num_output: 4096 + } +} +layers { + name: "relu7" + type: RELU + bottom: "fc7" + top: "fc7" +} +layers { + name: "drop7" + type: DROPOUT + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc8" + type: INNER_PRODUCT + bottom: "fc7" + top: "fc8" + inner_product_param { + num_output: 1000 + } +} +layers { + name: "prob" + type: SOFTMAX + bottom: "fc8" + top: "prob" +} diff --git a/modules/dnns_easily_fooled/caffe/ascent/find_fooling_image.py b/modules/dnns_easily_fooled/caffe/ascent/find_fooling_image.py new file mode 100755 index 000000000..a238fd9ea --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/ascent/find_fooling_image.py @@ -0,0 +1,315 @@ +#! /usr/bin/env python + +import argparse +import pickle +import pylab +from pylab import * +from scipy.ndimage.filters import gaussian_filter +from collections import OrderedDict +import ipdb as pdb +plt.rcParams['image.interpolation'] = 'nearest' +plt.rcParams['image.cmap'] = 'gray' + +# Make sure that caffe is on the python path: +caffe_root = '../../' # this file is normally in {caffe_root}/ascent. If it's elsewhere, change this path. +import sys +sys.path.insert(0, caffe_root + 'python') +# If this next line fails, check the relevant paths. +import caffe + +from misc_helper import * + + + +def load_net_mean(): + # Pick which model to load, which image, etc. + + model_def_file = 'deploy_1_forcebackward.prototxt' + + # Can be downloaded from http://yosinski.cs.cornell.edu/yos_140311__caffenet_iter_450000 + pretrained_model = 'yos_140311__caffenet_iter_450000' + + # Can be downloaded from http://s.yosinski.com/synset_words.txt + with open('%s/data/ilsvrc12/synset_words.txt' % caffe_root) as ff: + labels = [line.strip() for line in ff.readlines()] + + # Load mean + inmean = np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy') + + offset = (256-227)/2 + mn = inmean[:, offset:offset+227, offset:offset+227] + mni = mn.transpose((1,2,0)) + mnirgb = mni[:,:,::-1] # convert to rgb order + mn4d = mn[newaxis] + + net = caffe.Classifier(model_def_file, pretrained_model, + #mean=inmean, + channel_swap=(2,1,0), + #raw_scale=255.0, + #image_dims=(256, 256), + ) + + net.set_phase_test() + net.set_mode_cpu() + + return net, mnirgb, mn4d, labels + + + +def update_result(result, suffix, ii, X, X0): + result['iter_'+suffix] = ii + result['norm_'+suffix] = norm(X) + result['dist_'+suffix] = norm(X-X0) + result['std_'+suffix] = X.flatten().std() + result['X_'+suffix] = X.copy() + + + +def find_image(net, mnirgb, mn4d, labels, decay = .01, N = 300, rseed = 0, + push_layer = 'prob', push_idx = 278, start_at = 'mean_plus', prefix = 'junk', + lr_policy = 'progress', + lr_params = {'max_lr': 1e12, 'early_prog': .03, 'late_prog_mult': .1}, + blur_radius = 0, # 0 or at least .3 + blur_every = 1, + small_val_percentile = 0, + small_norm_percentile = 0, + px_benefit_percentile = 0, + px_abs_benefit_percentile = 0): + '''Find image for the given net using the specified start position, learning policies, etc.''' + + np.random.seed(rseed) + + #start_im = mnirgb[:] * 0 + if start_at == 'mean_plus': + start_im = np.random.normal(0, 1, mnirgb.shape) + elif start_at == 'randu': + start_im = uniform(0, 255, mnirgb.shape) - mnirgb + elif start_at == 'zero': + start_im = zeros(mnirgb.shape) + else: + raise Exception('Unknown start conditions: %s' % start_at) + + if lr_policy == 'progress': + assert 'max_lr' in lr_params + assert 'early_prog' in lr_params + assert 'late_prog_mult' in lr_params + elif lr_policy == 'constant': + assert 'lr' in lr_params + else: + raise Exception('Unknown lr_policy: %s' % lr_policy) + + try: + push_idx = tuple(push_idx) # tuple or list given + except TypeError: + push_idx = (push_idx, 0, 0) # int given + assert len(push_idx) == 3, 'provide push_idx in the form: int or (channel, x, y) tuple' + + #X0 = mn[newaxis,:] + #im255 = im01 * 255 - + + tmp = net.preprocess('data', start_im) # converts rgb -> bgr + X0 = tmp[newaxis,:] + + # What to change + #push_idx = 278 # kit fox + push_dir = 1.0 + class_unit = push_layer in ('fc8', 'prob') # Whether or not the unit being optimized corresponds to one of the 1000 classes + push_label = labels[push_idx[0]] if class_unit else 'None' + + X = X0.copy() + #figsize(20,8) + result = dict( + iter_maj = -1, + iter_99 = -1, + iter_999 = -1, + iter_9999 = -1, + iter_best = -1, + norm_maj = -1, + norm_99 = -1, + norm_999 = -1, + norm_9999 = -1, + norm_best = -1, + dist_maj = -1, + dist_99 = -1, + dist_999 = -1, + dist_9999 = -1, + dist_best = -1, + std_maj = -1, + std_99 = -1, + std_999 = -1, + std_9999 = -1, + std_best = -1, + act_best = -1, + X_maj = None, + X_99 = None, + X_999 = None, + X_9999 = None, + X_best = None, + decay = decay, N = N, push_idx = push_idx, push_dir = push_dir, push_layer = push_layer, + push_label = push_label, + lr_policy = lr_policy, lr_params = lr_params, + blur_radius = blur_radius, blur_every = blur_every, + small_val_percentile = small_val_percentile, small_norm_percentile = small_norm_percentile, + px_benefit_percentile = px_benefit_percentile, px_abs_benefit_percentile = px_abs_benefit_percentile, + ) + + print '\nParameters:' + for key in sorted(result.keys()): + print '%25s: %s' % (key, result[key]) + print + + for ii in range(N): + X = minimum(255.0, maximum(0.0, X + mn4d)) - mn4d # Crop all values to [0,255] + out = net.forward_all(data = X) + + acts = net.blobs[push_layer].data + + iimax = unravel_index(acts.argmax(), acts.shape)[1:] # chop off batch idx of 0 + obj = acts[0][push_idx] + if ii > 0 and lr_policy == 'progress': + print ' pred_prog: ', pred_prog, 'actual:', obj - old_obj + if class_unit: + print '%-4d' % ii, 'Push idx: %d, val: %g (%s)\n Max idx: %d, val: %g (%s)' % (push_idx[0], acts[0][push_idx], push_label, iimax[0], acts.max(), labels[iimax[0]]) + else: + print '%-4d' % ii, 'Push idx: %s, val: %g\n Max idx: %s, val: %g' % (push_idx, acts[0][push_idx], iimax, acts.max()) + print ' X: ', X.min(), X.max(), norm(X) + + if acts[0][push_idx] > result['act_best']: + update_result(result, 'best', ii, X, X0) + result['acts_best'] = acts[0][push_idx] + if iimax == push_idx and result['iter_maj'] == -1: + update_result(result, 'maj', ii, X, X0) + if acts[0][push_idx] > .99 and result['iter_99'] == -1: + update_result(result, '99', ii, X, X0) + if acts[0][push_idx] > .999 and result['iter_999'] == -1: + update_result(result, '999', ii, X, X0) + if acts[0][push_idx] > .9999 and result['iter_9999'] == -1: + update_result(result, '9999', ii, X, X0) + #break # Quit once confidence > .9999 + + diffs = net.blobs[push_layer].diff * 0 + diffs[0][push_idx] = push_dir + backout = net.backward_from_layer(push_layer, diffs) + + grad = backout['data'].copy() + print ' grad:', grad.min(), grad.max(), norm(grad) + if norm(grad) == 0: + print 'Grad 0, failed' + break + + # progress-based lr + if lr_policy == 'progress': + late_prog = lr_params['late_prog_mult'] * (1-obj) + desired_prog = min(lr_params['early_prog'], late_prog) + prog_lr = desired_prog / norm(grad)**2 + lr = min(lr_params['max_lr'], prog_lr) + print ' desired_prog:', desired_prog, 'prog_lr:', prog_lr, 'lr:', lr + pred_prog = lr * dot(grad.flatten(), grad.flatten()) + elif lr_policy == 'constant': + lr = lr_params['lr'] + else: + raise Exception('Unimlemented lr_policy') + + print ' change size:', abs(lr * grad).max() + old_obj = obj + + + if ii < N-1: + X += lr * grad + X *= (1 - decay) + + if blur_radius > 0: + if blur_radius < .3: + raise Exception('blur-radius of .3 or less works very poorly') + oldX = X.copy() + if ii % blur_every == 0: + for channel in range(3): + cimg = gaussian_filter(X[0,channel], blur_radius) + X[0,channel] = cimg + if small_val_percentile > 0: + small_entries = (abs(X) < percentile(abs(X), small_val_percentile)) + X = X - X*small_entries # set smallest 50% of X to zero + + if small_norm_percentile > 0: + pxnorms = norm(X, axis=1) + smallpx = pxnorms < percentile(pxnorms, small_norm_percentile) + smallpx3 = tile(smallpx[:,newaxis,:,:], (1,3,1,1)) + X = X - X*smallpx3 + + if px_benefit_percentile > 0: + pred_0_benefit = grad * -X + px_benefit = pred_0_benefit.sum(1) + smallben = px_benefit < percentile(px_benefit, px_benefit_percentile) + smallben3 = tile(smallben[:,newaxis,:,:], (1,3,1,1)) + X = X - X*smallben3 + + if px_abs_benefit_percentile > 0: + pred_0_benefit = grad * -X + px_benefit = pred_0_benefit.sum(1) + smallaben = abs(px_benefit) < percentile(abs(px_benefit), px_abs_benefit_percentile) + smallaben3 = tile(smallaben[:,newaxis,:,:], (1,3,1,1)) + X = X - X*smallaben3 + + + if class_unit: + if result['iter_maj'] != -1: + print 'Result: majority success' + else: + print 'Result: no convergence' + + for suffix in ('maj', '99', '999', '9999', 'best'): + if result['X_'+suffix] is not None: + asimg = net.deprocess('data', result['X_'+suffix]) + if suffix == 'best': + best_X = asimg.copy() + saveimagescc('%s_%s_X.jpg' % (prefix, suffix), asimg, 0) + saveimagesc('%s_%s_Xpm.jpg' % (prefix, suffix), asimg + mnirgb) + del result['X_'+suffix] + with open('%s_info.pkl' % prefix, 'w') as ff: + pickle.dump(result, ff) + with open('%s_info.txt' % prefix, 'w') as ff: + for key in sorted(result.keys()): + print >>ff, key, result[key] + + return best_X + + +def main(): + parser = argparse.ArgumentParser(description='Finds images that activate a network in various ways.') + parser.add_argument('--lr', type = float, default = .01) + parser.add_argument('--decay', type = float, default = .01) + parser.add_argument('--N', type = int, default = 300) + parser.add_argument('--rseed', type = int, default = 0) + parser.add_argument('--push_idx', type = int, default = -1) + parser.add_argument('--start_at', type = str, default = 'mean_plus') + parser.add_argument('--prefix', type = str, default = '%(push_idx)03d') + parser.add_argument('--multi_idx_start', type = int, default = -1) + parser.add_argument('--multi_idx_end', type = int, default = -1) + args = parser.parse_args() + + assert (args.push_idx == -1) != (args.multi_idx_start == -1 and args.multi_idx_end == -1), 'Use push_idx xor multi*' + assert (args.multi_idx_start == -1) == (args.multi_idx_end == -1), 'Use all multi* or none' + + net, mnirgb, mn4d, labels = load_net_mean() + + if args.push_idx != -1: + range_start = args.push_idx + range_end = args.push_idx + 1 + else: + range_start = args.multi_idx_start + range_end = args.multi_idx_end + for push_idx in range(range_start, range_end): + prefix_dict = vars(args) + prefix_dict['push_idx'] = push_idx + prefix_str = args.prefix % prefix_dict + print '\n\nFinding image' + print 'prefix_str', prefix_str + find_image(net, mnirgb, mn4d, labels, + lr = args.lr, decay = args.decay, N = args.N, rseed = args.rseed, + push_idx = args.push_idx, start_at = args.start_at, + prefix = prefix_str) + + + +if __name__ == '__main__': + main() diff --git a/modules/dnns_easily_fooled/caffe/ascent/hyperparam_search.py b/modules/dnns_easily_fooled/caffe/ascent/hyperparam_search.py new file mode 100755 index 000000000..e1edcb1d5 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/ascent/hyperparam_search.py @@ -0,0 +1,97 @@ +#! /usr/bin/env python + +from pylab import * +import os +import argparse +import ipdb as pdb + +from find_fooling_image import load_net_mean, find_image + + + +def rchoose(choices, prob=None): + if prob is None: + prob = ones(len(choices)) + prob = array(prob, dtype='float') + return np.random.choice(choices, p=prob/prob.sum()) + + + +def main(): + parser = argparse.ArgumentParser(description='Hyperparam search') + parser.add_argument('--result_prefix', type = str, default = './junk') + parser.add_argument('--hp_seed', type = int, default = 0) + parser.add_argument('--start_seed', type = int, default = 0) + parser.add_argument('--push_idx', type = int, default = 278) + parser.add_argument('--layer', type = str, default = 'prob', choices = ('fc8', 'prob')) + parser.add_argument('--startat', type = int, default = 0, choices = (0, 1)) + args = parser.parse_args() + + push_idx = args.push_idx + small_val_percentile = 0 + start_at = 'mean_plus' if args.startat == 0 else 'randu' + + if args.hp_seed == -1: + # Special hp_seed of -1 to do gradient descent without any regularization + decay = 0 + N = 500 + early_prog = .02 + late_prog_mult = .1 + blur_radius = 0 + blur_every = 1 + small_norm_percentile = 0 + px_benefit_percentile = 0 + px_abs_benefit_percentile = 0 + else: + np.random.seed(args.hp_seed) + + # Choose hyperparameter values given this seed + decay = rchoose((0, .0001, .001, .01, .1, .2, .3), + (4, 1, 1, 2, 1, 1, 1)) + N = rchoose((250, 500, 750, 1000, 1500)) + early_prog = rchoose( + (.02, .03, .04), + (1, 2, 1)) + late_prog_mult = rchoose((.02, .05, .1, .2)) + blur_radius = rchoose( + (0, .3, .4, .5, 1.0), + (10, 2, 1, 1, 1)) + blur_every = rchoose((1, 2, 3, 4)) + small_norm_percentile = rchoose( + (0, 10, 20, 30, 50, 80, 90), + (10, 10, 5, 2, 2, 2, 2)) + px_benefit_percentile = rchoose( + (0, 10, 20, 30, 50, 80, 90), + (20, 10, 5, 2, 2, 2, 2)) + px_abs_benefit_percentile = rchoose( + (0, 10, 20, 30, 50, 80, 90), + (10, 10, 5, 2, 2, 2, 2)) + + prefix = args.result_prefix + print 'prefix is', prefix + + net, mnirgb, mn4d, labels = load_net_mean() + + find_image(net, mnirgb, mn4d, labels, + decay = decay, + N = N, + rseed = args.start_seed, + push_idx = push_idx, + start_at = start_at, + prefix = prefix, + lr_policy = 'progress', + lr_params = {'max_lr': 1e7, + 'early_prog': early_prog, + 'late_prog_mult': late_prog_mult}, + blur_radius = blur_radius, + blur_every = blur_every, + small_val_percentile = small_val_percentile, + small_norm_percentile = small_norm_percentile, + px_benefit_percentile = px_benefit_percentile, + px_abs_benefit_percentile = px_abs_benefit_percentile, + ) + + + +if __name__ == '__main__': + main() diff --git a/modules/dnns_easily_fooled/caffe/ascent/misc_helper.py b/modules/dnns_easily_fooled/caffe/ascent/misc_helper.py new file mode 100644 index 000000000..9304e5536 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/ascent/misc_helper.py @@ -0,0 +1,112 @@ +#! /usr/bin/env python + +from pylab import * + + + +def figsize(width,height): + rcParams['figure.figsize'] = (width,height) + + + +def norm01(arr): + arr = arr.copy() + arr -= arr.min() + arr /= arr.max() + return arr + + + +def norm01c(arr, center): + '''Maps the center value to .5''' + arr = arr.copy() + arr -= center + arr /= max(2 * arr.max(), -2 * arr.min()) + arr += .5 + assert arr.min() >= 0 + assert arr.max() <= 1 + return arr + + + +def showimage(im, c01=False, bgr=False): + if c01: + # switch order from c,0,1 -> 0,1,c + im = im.transpose((1,2,0)) + if im.ndim == 3 and bgr: + # Change from BGR -> RGB + im = im[:, :, ::-1] + plt.imshow(im) + #axis('tight') + +def showimagesc(im, c01=False, bgr=False): + showimage(norm01(im), c01=c01, bgr=bgr) + + + +def saveimage(filename, im): + matplotlib.image.imsave(filename, im) + +def saveimagesc(filename, im): + saveimage(filename, norm01(im)) + +def saveimagescc(filename, im, center): + saveimage(filename, norm01c(im, center)) + + + +def tile_images(data, padsize=1, padval=0, c01=False, width=None): + '''take an array of shape (n, height, width) or (n, height, width, channels) + and visualize each (height, width) thing in a grid. If width = None, produce + a square image of size approx. sqrt(n) by sqrt(n), else calculate height.''' + data = data.copy() + if c01: + # Convert c01 -> 01c + data = data.transpose(0, 2, 3, 1) + data -= data.min() + data /= data.max() + + # force the number of filters to be square + if width == None: + width = int(np.ceil(np.sqrt(data.shape[0]))) + height = width + else: + assert isinstance(width, int) + height = int(np.ceil(float(data.shape[0]) / width)) + padding = ((0, width*height - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3) + data = np.pad(data, padding, mode='constant', constant_values=(padval, padval)) + + # tile the filters into an image + data = data.reshape((height, width) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) + data = data.reshape((height * data.shape[1], width * data.shape[3]) + data.shape[4:]) + data = data[0:-padsize, 0:-padsize] # remove excess padding + + return data + + + +def vis_square(data, padsize=1, padval=0, c01=False): + data = tile_images(data, padsize, padval, c01) + showimage(data, c01=False) + + + +def shownet(net): + '''Print some stats about a net and its activations''' + + print '%-41s%-31s%s' % ('', 'acts', 'act diffs') + print '%-45s%-31s%s' % ('', 'params', 'param diffs') + for k, v in net.blobs.items(): + if k in net.params: + params = net.params[k] + for pp, blob in enumerate(params): + if pp == 0: + print ' ', 'P: %-5s'%k, + else: + print ' ' * 11, + print '%-32s' % repr(blob.data.shape), + print '%-30s' % ('(%g, %g)' % (blob.data.min(), blob.data.max())), + print '(%g, %g)' % (blob.diff.min(), blob.diff.max()) + print '%-5s'%k, '%-34s' % repr(v.data.shape), + print '%-30s' % ('(%g, %g)' % (v.data.min(), v.data.max())), + print '(%g, %g)' % (v.diff.min(), v.diff.max()) diff --git a/modules/dnns_easily_fooled/caffe/ascent/results/.gitignore b/modules/dnns_easily_fooled/caffe/ascent/results/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/modules/dnns_easily_fooled/caffe/ascent/run.sh b/modules/dnns_easily_fooled/caffe/ascent/run.sh new file mode 100755 index 000000000..6a4428c98 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/ascent/run.sh @@ -0,0 +1,7 @@ +#! /bin/bash + +echo "just for reference" +exit 0 + +for idx in 0 1 2 3 4; do ./find_fooling_image.py --push_idx $idx --N 1500 --decay .03 --lr .001 --prefix 'result_idx3/idx_%(push_idx)03d_decay_%(decay).03f_lr_%(lr).03f_'; done +for idx in 0 1 2 3 4; do ./find_fooling_image.py --push_idx $idx --N 1500 --decay .00 --lr .001 --prefix 'result_idx3/idx_%(push_idx)03d_decay_%(decay).03f_lr_%(lr).03f_'; done diff --git a/modules/dnns_easily_fooled/caffe/ascent/run_chosen_supplementary.sh b/modules/dnns_easily_fooled/caffe/ascent/run_chosen_supplementary.sh new file mode 100755 index 000000000..1c2e985cb --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/ascent/run_chosen_supplementary.sh @@ -0,0 +1,31 @@ +#! /bin/bash -x + +thisscript=$(readlink -f $0) +scriptdir=`dirname $thisscript` + + +for hp_seed in -1 169 188 360; do + #for push_idx in 278 543 251 99 906 805; do + for push_idx in 200 207 215 279 366 367 390 414 445 500 509 580 643 657 704 713 782 805 826 906; do + for start_seed in `seq 0 4`; do + startat=0 + + seed_dir=`printf "seed_%04d" $hp_seed` + result_dir="$scriptdir/results/supplementary_imgs/$seed_dir" + mkdir -p $result_dir + run_str=`printf 's%04d_idx%03d_sa%d_ss%02d' $hp_seed $push_idx $startat $start_seed` + jobname="job_${run_str}" + + script="$result_dir/run_${run_str}.sh" + result_prefix="$result_dir/$run_str" + + echo "#! /bin/bash" > $script + echo "cd $scriptdir" >> $script + echo "./hyperparam_search.py --result_prefix $result_prefix --hp_seed $hp_seed --push_idx $push_idx --start_seed $start_seed --startat $startat 2>&1" >> $script + chmod +x $script + + qsub -N "$jobname" -A ACCOUNT_NAME -l nodes=1:ppn=2 -l walltime="1:00:00" -d "$result_dir" $script + done + done +done + diff --git a/modules/dnns_easily_fooled/caffe/ascent/run_hyperparam_search.sh b/modules/dnns_easily_fooled/caffe/ascent/run_hyperparam_search.sh new file mode 100755 index 000000000..f56b7910c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/ascent/run_hyperparam_search.sh @@ -0,0 +1,32 @@ +#! /bin/bash -x + +thisscript=$(readlink -f $0) +scriptdir=`dirname $thisscript` + +for hp_seed in `seq 101 399`; do +#for hp_seed in 0; do + for push_idx in 278 543 251 99 906 805; do + #for push_idx in 278; do + startat=0 + start_seed=0 + + seed_dir=`printf "seed_%04d" $hp_seed` + result_dir="$scriptdir/results/$seed_dir" + mkdir -p $result_dir + run_str=`printf 's%04d_idx%03d_sa%d_ss%02d' $hp_seed $push_idx $start_at $start_seed` + jobname="job_${run_str}" + + script="$result_dir/run_${run_str}.sh" + result_prefix="$result_dir/$run_str" + + echo "#! /bin/bash" > $script + echo "cd $scriptdir" >> $script + echo "./hyperparam_search.py --result_prefix $result_prefix --hp_seed $hp_seed --push_idx $push_idx --start_seed $start_seed --startat $startat 2>&1" >> $script + chmod +x $script + + qsub -N "$jobname" -A ACCOUNT_NAME -l nodes=1:ppn=2 -l walltime="1:00:00" -d "$result_dir" $script + + #sleep 1 + done +done + diff --git a/modules/dnns_easily_fooled/caffe/caffe.cloc b/modules/dnns_easily_fooled/caffe/caffe.cloc new file mode 100644 index 000000000..37a98bf52 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/caffe.cloc @@ -0,0 +1,53 @@ +Bourne Shell + filter remove_matches ^\s*# + filter remove_inline #.*$ + extension sh + script_exe sh +C + filter remove_matches ^\s*// + filter call_regexp_common C + filter remove_inline //.*$ + extension c + extension ec + extension pgc +C++ + filter remove_matches ^\s*// + filter remove_inline //.*$ + filter call_regexp_common C + extension C + extension cc + extension cpp + extension cxx + extension pcc +C/C++ Header + filter remove_matches ^\s*// + filter call_regexp_common C + filter remove_inline //.*$ + extension H + extension h + extension hh + extension hpp +Cuda + filter remove_matches ^\s*// + filter remove_inline //.*$ + filter call_regexp_common C + extension cu +Python + filter remove_matches ^\s*# + filter docstring_to_C + filter call_regexp_common C + filter remove_inline #.*$ + extension py +make + filter remove_matches ^\s*# + filter remove_inline #.*$ + extension Gnumakefile + extension Makefile + extension am + extension gnumakefile + extension makefile + filename Gnumakefile + filename Makefile + filename gnumakefile + filename makefile + script_exe make \ No newline at end of file diff --git a/modules/dnns_easily_fooled/caffe/docs/CNAME b/modules/dnns_easily_fooled/caffe/docs/CNAME new file mode 100644 index 000000000..eee1ae26d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/CNAME @@ -0,0 +1 @@ +caffe.berkeleyvision.org diff --git a/modules/dnns_easily_fooled/caffe/docs/README.md b/modules/dnns_easily_fooled/caffe/docs/README.md new file mode 100644 index 000000000..81e1566be --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/README.md @@ -0,0 +1,3 @@ +To generate stuff you can paste in an .md page from an IPython notebook, run + + ipython nbconvert --to markdown diff --git a/modules/dnns_easily_fooled/caffe/docs/_layouts/default.html b/modules/dnns_easily_fooled/caffe/docs/_layouts/default.html new file mode 100644 index 000000000..29c7a0810 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/_layouts/default.html @@ -0,0 +1,52 @@ + + + + + + Caffe + + + + + + + + + + +
+
+

Caffe

+

Convolutional Architecture for Fast Feature Embedding

+ + +

Maintained by
BVLC

+

Created by
Yangqing Jia

+ +
+
+ + {{ content }} + +
+ +
+ + + diff --git a/modules/dnns_easily_fooled/caffe/docs/cifar10.md b/modules/dnns_easily_fooled/caffe/docs/cifar10.md new file mode 100644 index 000000000..dd85667d8 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/cifar10.md @@ -0,0 +1,95 @@ +--- +layout: default +title: Caffe +--- + +Alex's CIFAR-10 tutorial, Caffe style +===================================== + +Alex Krizhevsky's [cuda-convnet](https://code.google.com/p/cuda-convnet/) details the model definitions, parameters, and training procedure for good performance on CIFAR-10. This example reproduces his results in Caffe. + +We will assume that you have Caffe successfully compiled. If not, please refer to the [Installation page](installation.html). In this tutorial, we will assume that your caffe installation is located at `CAFFE_ROOT`. + +We thank @chyojn for the pull request that defined the model schemas and solver configurations. + +*This example is a work-in-progress. It would be nice to further explain details of the network and training choices and benchmark the full training.* + +Prepare the Dataset +------------------- + +You will first need to download and convert the data format from the [CIFAR-10 website](http://www.cs.toronto.edu/~kriz/cifar.html). To do this, simply run the following commands: + + cd $CAFFE_ROOT/data/cifar10 + ./get_cifar10.sh + cd $CAFFE_ROOT/examples/cifar10 + ./create_cifar10.sh + +If it complains that `wget` or `gunzip` are not installed, you need to install them respectively. After running the script there should be the dataset, `./cifar10-leveldb`, and the data set image mean `./mean.binaryproto`. + +The Model +--------- + +The CIFAR-10 model is a CNN that composes layers of convolution, pooling, rectified linear unit (ReLU) nonlinearities, and local contrast normalization with a linear classifier on top of it all. We have defined the model in the `CAFFE_ROOT/examples/cifar10` directory's `cifar10_quick_train.prototxt`. + +Training and Testing the "Quick" Model +-------------------------------------- + +Training the model is simple after you have written the network definition protobuf and solver protobuf files. Simply run `train_quick.sh`, or the following command directly: + + cd $CAFFE_ROOT/examples/cifar10 + ./train_quick.sh + +`train_quick.sh` is a simple script, so have a look inside. `GLOG_logtostderr=1` is the google logging flag that prints all the logging messages directly to stderr. The main tool for training is `train_net.bin`, with the solver protobuf text file as its argument. + +When you run the code, you will see a lot of messages flying by like this: + + I0317 21:52:48.945710 2008298256 net.cpp:74] Creating Layer conv1 + I0317 21:52:48.945716 2008298256 net.cpp:84] conv1 <- data + I0317 21:52:48.945725 2008298256 net.cpp:110] conv1 -> conv1 + I0317 21:52:49.298691 2008298256 net.cpp:125] Top shape: 100 32 32 32 (3276800) + I0317 21:52:49.298719 2008298256 net.cpp:151] conv1 needs backward computation. + +These messages tell you the details about each layer, its connections and its output shape, which may be helpful in debugging. After the initialization, the training will start: + + I0317 21:52:49.309370 2008298256 net.cpp:166] Network initialization done. + I0317 21:52:49.309376 2008298256 net.cpp:167] Memory required for Data 23790808 + I0317 21:52:49.309422 2008298256 solver.cpp:36] Solver scaffolding done. + I0317 21:52:49.309447 2008298256 solver.cpp:47] Solving CIFAR10_quick_train + +Based on the solver setting, we will print the training loss function every 100 iterations, and test the network every 500 iterations. You will see messages like this: + + I0317 21:53:12.179772 2008298256 solver.cpp:208] Iteration 100, lr = 0.001 + I0317 21:53:12.185698 2008298256 solver.cpp:65] Iteration 100, loss = 1.73643 + ... + I0317 21:54:41.150030 2008298256 solver.cpp:87] Iteration 500, Testing net + I0317 21:54:47.129461 2008298256 solver.cpp:114] Test score #0: 0.5504 + I0317 21:54:47.129500 2008298256 solver.cpp:114] Test score #1: 1.27805 + +For each training iteration, `lr` is the learning rate of that iteration, and `loss` is the training function. For the output of the testing phase, **score 0 is the accuracy**, and **score 1 is the testing loss function**. + +And after making yourself a cup of coffee, you are done! + + I0317 22:12:19.666914 2008298256 solver.cpp:87] Iteration 5000, Testing net + I0317 22:12:25.580330 2008298256 solver.cpp:114] Test score #0: 0.7533 + I0317 22:12:25.580379 2008298256 solver.cpp:114] Test score #1: 0.739837 + I0317 22:12:25.587262 2008298256 solver.cpp:130] Snapshotting to cifar10_quick_iter_5000 + I0317 22:12:25.590215 2008298256 solver.cpp:137] Snapshotting solver state to cifar10_quick_iter_5000.solverstate + I0317 22:12:25.592813 2008298256 solver.cpp:81] Optimization Done. + +Our model achieved ~75% test accuracy. The model parameters are stored in binary protobuf format in + + cifar10_quick_iter_5000 + +which is ready-to-deploy in CPU or GPU mode! Refer to the `CAFFE_ROOT/examples/cifar10/cifar10_quick.prototxt` for the deployment model definition that can be called on new data. + +Why train on a GPU? +------------------- + +CIFAR-10, while still small, has enough data to make GPU training attractive. + +To compare CPU vs. GPU training speed, simply change one line in all the `cifar*solver.prototxt`: + + # solver mode: CPU or GPU + solver_mode: CPU + +and you will be using CPU for training. diff --git a/modules/dnns_easily_fooled/caffe/docs/development.md b/modules/dnns_easily_fooled/caffe/docs/development.md new file mode 100644 index 000000000..26e4332cf --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/development.md @@ -0,0 +1,63 @@ +--- +layout: default +title: Caffe +--- + +Developing & Contributing +========================= + +Caffe is developed with active participation of the community by the [Berkeley Vision and Learning Center](http://bvlc.eecs.berkeley.edu/). +We welcome all contributions! + +The [contributing workflow](https://github.com/BVLC/caffe#development) is explained in the README. These guidelines cover development practices in Caffe. This is a work-in-progress. + +**Development Flow** + +- `master` is golden. +- `dev` is for new development: it is the branching point for features and the base of pull requests. + * The history of `dev` is not rewritten. + * Contributions are shepherded from `dev` to `master` by BVLC by merge. +- To err is human. Accidents are fixed by reverts. +- Releases are marked with tags on merge from `dev` to `master`. + +**Issues & Pull Request Protocol** + +0. Make issues for [bugs](https://github.com/BVLC/caffe/issues?labels=bug&page=1&state=open), tentative proposals, and [questions](https://github.com/BVLC/caffe/issues?labels=question&page=1&state=open). +1. Make PRs to signal development: + a. Make PRs *as soon as development begins*. Create a feature branch, make your initial commit, push, and PR to let everyone know you are working on it and let discussion guide development instead of review development after-the-fact. + b. When a proposal from the first step earns enough interest to warrant development, make a PR, and reference and close the old issue to direct the conversation to the PR. +2. When a PR is ready, comment to request a maintainer be assigned to review and merge to `dev`. + +A PR is only ready for review when the code is committed, documented, linted, and tested! + +**Documentation**: the documentation is bundled with Caffe in `docs/`. This includes the site you are reading now. Contributions should be documented both inline in code and through usage examples. New documentation is published by BVLC with each release and between releases as-needed. + +We'd appreciate your contribution to the documentation effort! + +**Testing**: run `make runtest` to check the project tests. New code requires new tests. Pull requests that fail tests will not be accepted. + +The `googletest` framework we use provides many additional options, which you can access by running the test binaries directly. One of the more useful options is `--gtest_filter`, which allows you to filter tests by name: + + # run all tests with CPU in the name + build/test/test_all.testbin --gtest_filter='*CPU*' + + # run all tests without GPU in the name (note the leading minus sign) + build/test/test_all.testbin --gtest_filter=-'*GPU*' + +To get a list of all options `googletest` provides, simply pass the `--help` flag: + + build/test/test_all.testbin --help + +**Style** + +- Follow [Google C++ style](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml) and [Google python style](http://google-styleguide.googlecode.com/svn/trunk/pyguide.html) + [PEP 8](http://legacy.python.org/dev/peps/pep-0008/). +- Wrap lines at 80 chars. +- Remember that “a foolish consistency is the hobgoblin of little minds,” so use your best judgement to write the clearest code for your particular case. + +**Lint**: run `make lint` to check C++ code. + +**Copyright**: assign copyright jointly to BVLC and contributors like so: + + // Copyright 2014 BVLC and contributors. + +The exact details of contributions are recorded by versioning and cited in our [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements). This method is impartial and always up-to-date. diff --git a/modules/dnns_easily_fooled/caffe/docs/feature_extraction.md b/modules/dnns_easily_fooled/caffe/docs/feature_extraction.md new file mode 100644 index 000000000..fa23e9c87 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/feature_extraction.md @@ -0,0 +1,71 @@ +--- +layout: default +title: Caffe +--- + +Extracting Features +=================== + +In this tutorial, we will extract features using a pre-trained model. +Follow instructions for [setting up caffe](installation.html) and for [getting](getting_pretrained_models.html) the pre-trained ImageNet model. +If you need detailed information about the tools below, please consult their source code, in which additional documentation is usually provided. + +Select data to run on +--------------------- + +We'll make a temporary folder to store things into. + + mkdir examples/_temp + +Generate a list of the files to process. +We're going to use the images that ship with caffe. + + find `pwd`/examples/images -type f -exec echo {} \; > examples/_temp/temp.txt + +The `ImageDataLayer` we'll use expects labels after each filenames, so let's add a 0 to the end of each line + + sed "s/$/ 0/" examples/_temp/temp.txt > examples/_temp/file_list.txt + +Define the Feature Extraction Network Architecture +-------------------------------------------------- + +In practice, subtracting the mean image from a dataset significantly improves classification accuracies. +Download the mean image of the ILSVRC dataset. + + data/ilsvrc12/get_ilsvrc_aux.sh + +We will use `data/ilsvrc212/imagenet_mean.binaryproto` in the network definition prototxt. + +Let's copy and modify the network definition. +We'll be using the `ImageDataLayer`, which will load and resize images for us. + + cp examples/feature_extraction/imagenet_val.prototxt examples/_temp + +Edit `examples/_temp/imagenet_val.prototxt` to use correct path for your setup (replace `$CAFFE_DIR`) + +Extract Features +---------------- + +Now everything necessary is in place. + + build/tools/extract_features.bin examples/imagenet/caffe_reference_imagenet_model examples/_temp/imagenet_val.prototxt fc7 examples/_temp/features 10 + +The name of feature blob that you extract is `fc7`, which represents the highest level feature of the reference model. +We can use any other layer, as well, such as `conv5` or `pool3`. + +The last parameter above is the number of data mini-batches. + +The features are stored to LevelDB `examples/_temp/features`, ready for access by some other code. + +If you meet with the error "Check failed: status.ok() Failed to open leveldb examples/_temp/features", it is because the directory examples/_temp/features has been created the last time you run the command. Remove it and run again. + + rm -rf examples/_temp/features/ + +If you'd like to use the Python wrapper for extracting features, check out the [layer visualization notebook](http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/filter_visualization.ipynb). + +Clean Up +-------- + +Let's remove the temporary directory now. + + rm -r examples/_temp diff --git a/modules/dnns_easily_fooled/caffe/docs/getting_pretrained_models.md b/modules/dnns_easily_fooled/caffe/docs/getting_pretrained_models.md new file mode 100644 index 000000000..a7b5e875c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/getting_pretrained_models.md @@ -0,0 +1,29 @@ +--- +layout: default +--- + +# Pre-trained models + +[BVLC](http://bvlc.eecs.berkeley.edu) aims to provide a variety of high quality pre-trained models. +Note that unlike Caffe itself, these models are licensed for **academic research / non-commercial use only**. +If you have any questions, please get in touch with us. + +This page will be updated as more models become available. + +### ImageNet + +**Caffe Reference ImageNet Model**: Our reference implementation of an ImageNet model trained on ILSVRC-2012 can be downloaded (232.6MB) by running `examples/imagenet/get_caffe_reference_imagenet_model.sh` from the Caffe root directory. + +- The bundled model is the iteration 310,000 snapshot. +- The best validation performance during training was iteration 313,000 with + validation accuracy 57.412% and loss 1.82328. + +**AlexNet**: Our training of the Krizhevsky architecture, which differs from the paper's methodology by (1) not training with the relighting data-augmentation and (2) initializing non-zero biases to 0.1 instead of 1. (2) was found necessary for training, as initialization to 1 gave flat loss. Download the model (243.9MB) by running `examples/imagenet/get_caffe_alexnet_model.sh` from the Caffe root directory. + +- The bundled model is the iteration 360,000 snapshot. +- The best validation performance during training was iteration 358,000 with + validation accuracy 57.258% and loss 1.83948. + +**R-CNN (ILSVRC13)**: The pure Caffe instantiation of the [R-CNN](https://github.com/rbgirshick/rcnn) model for ILSVRC13 detection. Download the model (230.8MB) by running `examples/imagenet/get_caffe_rcnn_imagenet_model.sh` from the Caffe root directory. This model was made by transplanting the R-CNN SVM classifiers into a `fc-rcnn` classification layer, provided here as an off-the-shelf Caffe detector. Try the [detection example](http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/detection.ipynb) to see it in action. For the full details, refer to the R-CNN site. *N.B. For research purposes, make use of the official R-CNN package and not this example.* + +Additionally, you will probably eventually need some auxiliary data (mean image, synset list, etc.): run `data/ilsvrc12/get_ilsvrc_aux.sh` from the root directory to obtain it. diff --git a/modules/dnns_easily_fooled/caffe/docs/imagenet_training.md b/modules/dnns_easily_fooled/caffe/docs/imagenet_training.md new file mode 100644 index 000000000..f628f7956 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/imagenet_training.md @@ -0,0 +1,102 @@ +--- +layout: default +title: Caffe +--- + +Yangqing's Recipe on Brewing ImageNet +===================================== + + "All your braincells are belong to us." + - Caffeine + +We are going to describe a reference implementation for the approach first proposed by Krizhevsky, Sutskever, and Hinton in their [NIPS 2012 paper](http://books.nips.cc/papers/files/nips25/NIPS2012_0534.pdf). Since training the whole model takes some time and energy, we provide a model, trained in the same way as we describe here, to help fight global warming. If you would like to simply use the pretrained model, check out the [Pretrained ImageNet](getting_pretrained_models.html) page. *Note that the pretrained model is for academic research / non-commercial use only*. + +To clarify, by ImageNet we actually mean the ILSVRC12 challenge, but you can easily train on the whole of ImageNet as well, just with more disk space, and a little longer training time. + +(If you don't get the quote, visit [Yann LeCun's fun page](http://yann.lecun.com/ex/fun/). + +Data Preparation +---------------- + +We assume that you already have downloaded the ImageNet training data and validation data, and they are stored on your disk like: + + /path/to/imagenet/train/n01440764/n01440764_10026.JPEG + /path/to/imagenet/val/ILSVRC2012_val_00000001.JPEG + +You will first need to prepare some auxiliary data for training. This data can be downloaded by: + + cd $CAFFE_ROOT/data/ilsvrc12/ + ./get_ilsvrc_aux.sh + +The training and validation input are described in `train.txt` and `val.txt` as text listing all the files and their labels. Note that we use a different indexing for labels than the ILSVRC devkit: we sort the synset names in their ASCII order, and then label them from 0 to 999. See `synset_words.txt` for the synset/name mapping. + +You may want to resize the images to 256x256 in advance. By default, we do not explicitly do this because in a cluster environment, one may benefit from resizing images in a parallel fashion, using mapreduce. For example, Yangqing used his lightedweighted [mincepie](https://github.com/Yangqing/mincepie) package to do mapreduce on the Berkeley cluster. If you would things to be rather simple and straightforward, you can also use shell commands, something like: + + for name in /path/to/imagenet/val/*.JPEG; do + convert -resize 256x256\! $name $name + done + +Go to `$CAFFE_ROOT/examples/imagenet/` for the rest of this guide. + +Take a look at `create_imagenet.sh`. Set the paths to the train and val dirs as needed, and set "RESIZE=true" to resize all images to 256x256 if you haven't resized the images in advance. Now simply create the leveldbs with `./create_imagenet.sh`. Note that `imagenet_train_leveldb` and `imagenet_val_leveldb` should not exist before this execution. It will be created by the script. `GLOG_logtostderr=1` simply dumps more information for you to inspect, and you can safely ignore it. + +Compute Image Mean +------------------ + +The model requires us to subtract the image mean from each image, so we have to compute the mean. `tools/compute_image_mean.cpp` implements that - it is also a good example to familiarize yourself on how to manipulate the multiple components, such as protocol buffers, leveldbs, and logging, if you are not familiar with them. Anyway, the mean computation can be carried out as: + + ./make_imagenet_mean.sh + +which will make `data/ilsvrc12/imagenet_mean.binaryproto`. + +Network Definition +------------------ + +The network definition follows strictly the one in Krizhevsky et al. You can find the detailed definition at `examples/imagenet/imagenet_train.prototxt`. Note the paths in the data layer - if you have not followed the exact paths in this guide you will need to change the following lines: + + source: "ilvsrc12_train_leveldb" + mean_file: "../../data/ilsvrc12/imagenet_mean.binaryproto" + +to point to your own leveldb and image mean. Likewise, do the same for `examples/imagenet/imagenet_val.prototxt`. + +If you look carefully at `imagenet_train.prototxt` and `imagenet_val.prototxt`, you will notice that they are largely the same, with the only difference being the data layer sources, and the last layer: in training, we will be using a `softmax_loss` layer to compute the loss function and to initialize the backpropagation, while in validation we will be using an `accuracy` layer to inspect how well we do in terms of accuracy. + +We will also lay out a protocol buffer for running the solver. Let's make a few plans: +* We will run in batches of 256, and run a total of 4,500,000 iterations (about 90 epochs). +* For every 1,000 iterations, we test the learned net on the validation data. +* We set the initial learning rate to 0.01, and decrease it every 100,000 iterations (about 20 epochs). +* Information will be displayed every 20 epochs. +* The network will be trained with momentum 0.9 and a weight decay of 0.0005. +* For every 10,000 iterations, we will take a snapshot of the current status. + +Sound good? This is implemented in `examples/imagenet/imagenet_solver.prototxt`. Again, you will need to change the first two lines: + + train_net: "imagenet_train.prototxt" + test_net: "imagenet_val.prototxt" + +to point to the actual path if you have changed them. + +Training ImageNet +----------------- + +Ready? Let's train. + + ./train_imagenet.sh + +Sit back and enjoy! On my K20 machine, every 20 iterations take about 36 seconds to run, so effectively about 7 ms per image for the full forward-backward pass. About 2.5 ms of this is on forward, and the rest is backward. If you are interested in dissecting the computation time, you can look at `examples/net_speed_benchmark.cpp`, but it was written purely for debugging purpose, so you may need to figure a few things out yourself. + +Resume Training? +---------------- + +We all experience times when the power goes out, or we feel like rewarding ourself a little by playing Battlefield (does someone still remember Quake?). Since we are snapshotting intermediate results during training, we will be able to resume from snapshots. This can be done as easy as: + + ./resume_training.sh + +where in the script `caffe_imagenet_train_1000.solverstate` is the solver state snapshot that stores all necessary information to recover the exact solver state (including the parameters, momentum history, etc). + +Parting Words +------------- + +Hope you liked this recipe! Many researchers have gone further since the ILSVRC 2012 challenge, changing the network architecture and/or finetuning the various parameters in the network. The recent ILSVRC 2013 challenge suggests that there are quite some room for improvement. **Caffe allows one to explore different network choices more easily, by simply writing different prototxt files** - isn't that exciting? + +And since now you have a trained network, check out how to use it: [Running Pretrained ImageNet](getting_pretrained_models.html). This time we will use Python, but if you have wrappers for other languages, please kindly send a pull request! diff --git a/modules/dnns_easily_fooled/caffe/docs/index.md b/modules/dnns_easily_fooled/caffe/docs/index.md new file mode 100644 index 000000000..4665425e6 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/index.md @@ -0,0 +1,79 @@ +--- +layout: default +--- +# Welcome to Caffe + +Caffe is a framework for convolutional neural network algorithms, developed with speed in mind. +It was created by [Yangqing Jia](http://daggerfs.com), and is in active development by the [Berkeley Vision and Learning Center](http://bvlc.eecs.berkeley.edu). + +Caffe is released under [the BSD 2-Clause license](https://github.com/BVLC/caffe/blob/master/LICENSE). + +Check out the [classification demo](http://demo.caffe.berkeleyvision.org/)! + +## Why Caffe? + +Caffe aims to provide computer vision scientists and practitioners with a **clean and modifiable implementation** of state-of-the-art deep learning algorithms. +For example, network structure is easily specified in separate config files, with no mess of hard-coded parameters in the code. + +At the same time, Caffe fits industry needs, with blazing fast C++/CUDA code for GPU computation. +Caffe is currently the fastest GPU CNN implementation publicly available, and is able to process more than **40 million images per day** with a single NVIDIA K40 or Titan GPU (or 20 million images per day on a K20 GPU)\*. That's 192 images per second during training and 500 images per second during test. + +Caffe also provides **seamless switching between CPU and GPU**, which allows one to train models with fast GPUs and then deploy them on non-GPU clusters with one line of code: `Caffe::set_mode(Caffe::CPU)`. +Even in CPU mode, computing predictions on an image takes only 20 ms when images are processed in batch mode. While in GPU mode, computing predictions on an image takes only 2 ms when images are processed in batch mode. + +## Documentation + +* [Introductory slides](https://www.dropbox.com/s/10fx16yp5etb8dv/caffe-presentation.pdf): slides about the Caffe architecture, *updated 03/14*. +* [Installation](/installation.html): Instructions on installing Caffe (works on Ubuntu, Red Hat, OS X). +* [Pre-trained models](/getting_pretrained_models.html): BVLC provides some pre-trained models for academic / non-commercial use. +* [Development](/development.html): Guidelines for development and contributing to Caffe. + +### Examples + +* [Image Classification \[notebook\]][imagenet_classification]: classify images with the pretrained ImageNet model by the Python interface. +* [Detection \[notebook\]][detection]: run a pretrained model as a detector in Python. +* [Visualizing Features and Filters \[notebook\]][visualizing_filters]: extracting features and visualizing trained filters with an example image, viewed layer-by-layer. +* [Editing Model Parameters \[notebook\]][net_surgery]: how to do net surgery and manually change model parameters. +* [LeNet / MNIST Demo](/mnist.html): end-to-end training and testing of LeNet on MNIST. +* [CIFAR-10 Demo](/cifar10.html): training and testing on the CIFAR-10 data. +* [Training ImageNet](/imagenet_training.html): recipe for end-to-end training of an ImageNet classifier. +* [Feature extraction with C++](/feature_extraction.html): feature extraction using pre-trained model. + +[imagenet_classification]: http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/imagenet_classification.ipynb +[detection]: http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/detection.ipynb +[visualizing_filters]: http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/filter_visualization.ipynb +[net_surgery]: http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/net_surgery.ipynb + +## Citing Caffe + +Please kindly cite Caffe in your publications if it helps your research: + + @misc{Jia13caffe, + Author = {Yangqing Jia}, + Title = { {Caffe}: An Open Source Convolutional Architecture for Fast Feature Embedding}, + Year = {2013}, + Howpublished = {\url{http://caffe.berkeleyvision.org/} + } + +### Acknowledgements + +Yangqing would like to thank the NVIDIA Academic program for providing K20 GPUs, and [Oriol Vinyals](http://www1.icsi.berkeley.edu/~vinyals/) for various discussions along the journey. + +A core set of BVLC members have contributed lots of new functionality and fixes since the original release (alphabetical by first name): + +- [Eric Tzeng](https://github.com/erictzeng) +- [Evan Shelhamer](http://imaginarynumber.net/) +- [Jeff Donahue](http://jeffdonahue.com/) +- [Jon Long](https://github.com/longjon) +- [Dr. Ross Girshick](http://www.cs.berkeley.edu/~rbg/) +- [Sergey Karayev](http://sergeykarayev.com/) +- [Dr. Sergio Guadarrama](http://www.eecs.berkeley.edu/~sguada/) + +Additionally, the open-source community plays a large and growing role in Caffe's development. +Check out the Github [project pulse](https://github.com/BVLC/caffe/pulse) for recent activity, and the [contributors](https://github.com/BVLC/caffe/graphs/contributors) for an ordered list (by commit activity). +We sincerely appreciate your interest and contributions! +If you'd like to contribute, read [this](development.html). + +--- + +\*: When measured with the [SuperVision](http://www.image-net.org/challenges/LSVRC/2012/supervision.pdf) model that won the ImageNet Large Scale Visual Recognition Challenge 2012. See [performance and hardware configuration details](/performance_hardware.html). diff --git a/modules/dnns_easily_fooled/caffe/docs/installation.md b/modules/dnns_easily_fooled/caffe/docs/installation.md new file mode 100644 index 000000000..1d2c77ec3 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/installation.md @@ -0,0 +1,182 @@ +--- +layout: default +title: Caffe +--- + +# Installation + +Prior to installing, it is best to read through this guide and take note of the details for your platform. +We have successfully compiled and run Caffe on Ubuntu 12.04, OS X 10.8, and OS X 10.9. + +- [Prerequisites](#prerequisites) +- [Compilation](#compilation) +- [Hardware questions](#hardware_questions) + +## Prerequisites + +Caffe depends on several software packages. + +* [CUDA](https://developer.nvidia.com/cuda-zone) (5.0, 5.5, or 6.0). +* [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) (provided via ATLAS, MKL, or OpenBLAS). +* [OpenCV](http://opencv.org/). +* [Boost](http://www.boost.org/) (we have only tested 1.55) +* `glog`, `gflags`, `protobuf`, `leveldb`, `snappy`, `hdf5` +* For the Python wrapper + * `Python`, `numpy (>= 1.7)`, boost-provided `boost.python` +* For the MATLAB wrapper + * MATLAB with the `mex` compiler. + +### CUDA and BLAS + +Caffe requires the CUDA `nvcc` compiler to compile its GPU code. +To install CUDA, go to the [NVIDIA CUDA website](https://developer.nvidia.com/cuda-downloads) and follow installation instructions there. **Note:** you can install the CUDA libraries without a CUDA card or driver, in order to build and run Caffe on a CPU-only machine. + +Caffe requires BLAS as the backend of its matrix and vector computations. +There are several implementations of this library. +The choice is yours: + +* [ATLAS](http://math-atlas.sourceforge.net/): free, open source, and so the default for Caffe. + + Ubuntu: `sudo apt-get install libatlas-base-dev` + + CentOS/RHEL: `sudo yum install libatlas-devel` + + OS X: already installed as the [Accelerate / vecLib Framework](https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man7/Accelerate.7.html). +* [Intel MKL](http://software.intel.com/en-us/intel-mkl): commercial and optimized for Intel CPUs, with a free trial and [student](http://software.intel.com/en-us/intel-education-offerings) licenses. + 1. Install MKL. + 2. Set `BLAS := mkl` in `Makefile.config` +* [OpenBLAS](http://www.openblas.net/): free and open source; this optimized and parallel BLAS could require more effort to install, although it might offer a speedup. + 1. Install OpenBLAS + 2. Set `BLAS := open` in `Makefile.config` + +### Python and/or Matlab wrappers (optional) + +Python: The main requirements are `numpy` and `boost.python` (provided by boost). `pandas` is useful too and needed for some examples. + +For **OS X**, we highly recommend using the [Anaconda](https://store.continuum.io/cshop/anaconda/) Python distribution, which provides most of the necessary packages, as well as the `hdf5` library dependency. +If you don't, please use Homebrew -- but beware of potential linking errors! + +Note that if you use the **Ubuntu** default python, you will need to `apt-get install` the `python-dev` package to have the python headers. You can install any remaining dependencies with + + pip install -r /path/to/caffe/python/requirements.txt + +MATLAB: install MATLAB, and make sure that its `mex` is in your `$PATH`. + +### The rest of the dependencies + +#### Linux + +On **Ubuntu**, the remaining dependencies can be installed with + + sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libboost-all-dev libhdf5-serial-dev + +And on **CentOS or RHEL**, you can install via yum using: + + sudo yum install protobuf-devel leveldb-devel snappy-devel opencv-devel boost-devel hdf5-devel + +The only exception being the google logging library, which does not exist in the Ubuntu 12.04 or CentOS/RHEL repositories. To install it, do: + + wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz + tar zxvf glog-0.3.3.tar.gz + ./configure + make && make install + +#### OS X + +On **OS X**, we highly recommend using the [homebrew](http://brew.sh/) package manager, and ideally starting from a clean install of the OS (or from a wiped `/usr/local`) to avoid conflicts. +In the following, we assume that you're using Anaconda Python and Homebrew. + +To install the OpenCV dependency, we'll need to provide an additional source for Homebrew: + + brew tap homebrew/science + +If using Anaconda Python, a modification is required to the OpenCV formula. +Do `brew edit opencv` and change the lines that look like the two lines below to exactly the two lines below. + + -DPYTHON_LIBRARY=#{py_prefix}/lib/libpython2.7.dylib + -DPYTHON_INCLUDE_DIR=#{py_prefix}/include/python2.7 + +**NOTE**: We find that everything compiles successfully if `$LD_LIBRARY_PATH` is not set at all, and `$DYLD_FALLBACK_LIBRARY_PATH` is set to to provide CUDA, Python, and other relevant libraries (e.g. `/usr/local/cuda/lib:$HOME/anaconda/lib:/usr/local/lib:/usr/lib`). +In other `ENV` settings, things may not work as expected. + +#### 10.8-specific Instructions + +Simply run the following: + + brew install --build-from-source --with-python boost + for x in snappy leveldb protobuf gflags glog szip homebrew/science/opencv; do brew install $x; done + +Building boost from source is needed to link against your local Python (exceptions might be raised during some OS X installs, but **ignore** these and continue). If you do not need the Python wrapper, simply doing `brew install boost` is fine. + +**Note** that the HDF5 dependency is provided by Anaconda Python in this case. +If you're not using Anaconda, include `hdf5` in the list above. + +#### 10.9-specific Instructions + +In OS X 10.9, clang++ is the default C++ compiler and uses `libc++` as the standard library. +However, NVIDIA CUDA (even version 6.0) currently links only with `libstdc++`. +This makes it necessary to change the compilation settings for each of the dependencies. + +We do this by modifying the homebrew formulae before installing any packages. +Make sure that homebrew doesn't install any software dependencies in the background; all packages must be linked to `libstdc++`. + +The prerequisite homebrew formulae are + + boost snappy leveldb protobuf gflags glog szip homebrew/science/opencv + +For each of these formulas, `brew edit FORMULA`, and add the ENV definitions as shown: + + def install + # ADD THE FOLLOWING: + ENV.append "CXXFLAGS", "-stdlib=libstdc++" + ENV.append "CFLAGS", "-stdlib=libstdc++" + ENV.append "LDFLAGS", "-stdlib=libstdc++ -lstdc++" + # The following is necessary because libtool likes to strip LDFLAGS: + ENV["CXX"] = "/usr/bin/clang++ -stdlib=libstdc++" + ... + +To edit the formulae in turn, run + + for x in snappy leveldb protobuf gflags glog szip boost homebrew/science/opencv; do brew edit $x; done + +After this, run + + for x in snappy leveldb protobuf gflags glog szip homebrew/science/opencv; do brew uninstall $x; brew install --build-from-source --fresh -vd $x; done + brew install --build-from-source --with-python --fresh -vd boost + +**Note** that `brew install --build-from-source --fresh -vd boost` is fine if you do not need the Caffe Python wrapper. + +**Note** that the HDF5 dependency is provided by Anaconda Python in this case. +If you're not using Anaconda, include `hdf5` in the list above. + +#### Windows + +There is an unofficial Windows port of Caffe at [niuzhiheng/caffe:windows](https://github.com/niuzhiheng/caffe). Thanks [@niuzhiheng](https://github.com/niuzhiheng)! + +## Compilation + +Now that you have the prerequisites, edit your `Makefile.config` to change the paths for your setup. +The defaults should work, but uncomment the relevant lines if using Anaconda Python. + + cp Makefile.config.example Makefile.config + # Adjust Makefile.config (for example, if using Anaconda Python) + make all + make test + make runtest + +Note that if there is no GPU in your machine, building and running CPU-only works, but GPU tests will naturally fail. + +To compile the Python and MATLAB wrappers do `make pycaffe` and `make matcaffe` respectively. +Be sure to set your MATLAB and Python paths in `Makefile.config` first! +For Python support, you must add the compiled module to your `$PYTHONPATH` (as `/path/to/caffe/python` or the like). + +*Distribution*: run `make distribute` to create a `distribute` directory with all the Caffe headers, compiled libraries, binaries, etc. needed for distribution to other machines. + +*Speed*: for a faster build, compile in parallel by doing `make all -j8` where 8 is the number of parallel threads for compilation (a good choice for the number of threads is the number of cores in your machine). + +Now that you have installed Caffe, check out the [MNIST demo](mnist.html) and the pretrained [ImageNet example](imagenet.html). + +## Hardware Questions + +**Laboratory Tested Hardware**: Berkeley Vision runs Caffe with K40s, K20s, and Titans including models at ImageNet/ILSVRC scale. We also run on GTX series cards and GPU-equipped MacBook Pros. We have not encountered any trouble in-house with devices with CUDA capability >= 3.0. All reported hardware issues thus-far have been due to GPU configuration, overheating, and the like. + +**CUDA compute capability**: devices with compute capability <= 2.0 may have to reduce CUDA thread numbers and batch sizes due to hardware constraints. Your mileage may vary. + +Refer to the project's issue tracker for [hardware/compatibility](https://github.com/BVLC/caffe/issues?labels=hardware%2Fcompatibility&page=1&state=open). diff --git a/modules/dnns_easily_fooled/caffe/docs/javascripts/scale.fix.js b/modules/dnns_easily_fooled/caffe/docs/javascripts/scale.fix.js new file mode 100644 index 000000000..08716c006 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/javascripts/scale.fix.js @@ -0,0 +1,20 @@ +fixScale = function(doc) { + + var addEvent = 'addEventListener', + type = 'gesturestart', + qsa = 'querySelectorAll', + scales = [1, 1], + meta = qsa in doc ? doc[qsa]('meta[name=viewport]') : []; + + function fix() { + meta.content = 'width=device-width,minimum-scale=' + scales[0] + ',maximum-scale=' + scales[1]; + doc.removeEventListener(type, fix, true); + } + + if ((meta = meta[meta.length - 1]) && addEvent in doc) { + fix(); + scales = [.25, 1.6]; + doc[addEvent](type, fix, true); + } + +}; \ No newline at end of file diff --git a/modules/dnns_easily_fooled/caffe/docs/mnist.md b/modules/dnns_easily_fooled/caffe/docs/mnist.md new file mode 100644 index 000000000..9a9b46a4c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/mnist.md @@ -0,0 +1,91 @@ +--- +layout: default +title: Caffe +--- + +Training MNIST with Caffe +================ + +We will assume that you have caffe successfully compiled. If not, please refer to the [Installation page](installation.html). In this tutorial, we will assume that your caffe installation is located at `CAFFE_ROOT`. + +Prepare Datasets +---------------- + +You will first need to download and convert the data format from the MNIST website. To do this, simply run the following commands: + + cd $CAFFE_ROOT/data/mnist + ./get_mnist.sh + cd $CAFFE_ROOT/examples/mnist + ./create_mnist.sh + +If it complains that `wget` or `gunzip` are not installed, you need to install them respectively. After running the script there should be two datasets, `mnist-train-leveldb`, and `mnist-test-leveldb`. + +LeNet: the MNIST Classification Model +------------------------------------- +Before we actually run the training program, let's explain what will happen. We will use the [LeNet](http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf) network, which is known to work well on digit classification tasks. We will use a slightly different version from the original LeNet implementation, replacing the sigmoid activations with Rectified Linear Unit (ReLU) activations for the neurons. + +The design of LeNet contains the essence of CNNs that are still used in larger models such as the ones in ImageNet. In general, it consists of a convolutional layer followed by a pooling layer, another convolution layer followed by a pooling layer, and then two fully connected layers similar to the conventional multilayer perceptrons. We have defined the layers in `CAFFE_ROOT/data/lenet.prototxt`. + +If you would like to read about step-by-step instruction on how the protobuf definitions are written, see [MNIST: Define the Network](mnist_prototxt.html) and [MNIST: Define the Solver](mnist_solver_prototxt.html)?. + +Training and Testing the Model +------------------------------ + +Training the model is simple after you have written the network definition protobuf and solver protobuf files. Simply run `train_mnist.sh`, or the following command directly: + + cd $CAFFE_ROOT/examples/mnist + ./train_lenet.sh + +`train_lenet.sh` is a simple script, but here are a few explanations: `GLOG_logtostderr=1` is the google logging flag that prints all the logging messages directly to stderr. The main tool for training is `train_net.bin`, with the solver protobuf text file as its argument. + +When you run the code, you will see a lot of messages flying by like this: + + I1203 net.cpp:66] Creating Layer conv1 + I1203 net.cpp:76] conv1 <- data + I1203 net.cpp:101] conv1 -> conv1 + I1203 net.cpp:116] Top shape: 20 24 24 + I1203 net.cpp:127] conv1 needs backward computation. + +These messages tell you the details about each layer, its connections and its output shape, which may be helpful in debugging. After the initialization, the training will start: + + I1203 net.cpp:142] Network initialization done. + I1203 solver.cpp:36] Solver scaffolding done. + I1203 solver.cpp:44] Solving LeNet + +Based on the solver setting, we will print the training loss function every 100 iterations, and test the network every 1000 iterations. You will see messages like this: + + I1203 solver.cpp:204] Iteration 100, lr = 0.00992565 + I1203 solver.cpp:66] Iteration 100, loss = 0.26044 + ... + I1203 solver.cpp:84] Testing net + I1203 solver.cpp:111] Test score #0: 0.9785 + I1203 solver.cpp:111] Test score #1: 0.0606671 + +For each training iteration, `lr` is the learning rate of that iteration, and `loss` is the training function. For the output of the testing phase, score 0 is the accuracy, and score 1 is the testing loss function. + +And after a few minutes, you are done! + + I1203 solver.cpp:84] Testing net + I1203 solver.cpp:111] Test score #0: 0.9897 + I1203 solver.cpp:111] Test score #1: 0.0324599 + I1203 solver.cpp:126] Snapshotting to lenet_iter_10000 + I1203 solver.cpp:133] Snapshotting solver state to lenet_iter_10000.solverstate + I1203 solver.cpp:78] Optimization Done. + +The final model, stored as a binary protobuf file, is stored at + + lenet_iter_10000 + +which you can deploy as a trained model in your application, if you are training on a real-world application dataset. + +Um... How about GPU training? +----------------------------- + +You just did! All the training was carried out on the GPU. In fact, if you would like to do training on CPU, you can simply change one line in `lenet_solver.prototxt`: + + # solver mode: CPU or GPU + solver_mode: CPU + +and you will be using CPU for training. Isn't that easy? + +MNIST is a small dataset, so training with GPU does not really introduce too much benefit due to communication overheads. On larger datasets with more complex models, such as ImageNet, the computation speed difference will be more significant. diff --git a/modules/dnns_easily_fooled/caffe/docs/mnist_prototxt.md b/modules/dnns_easily_fooled/caffe/docs/mnist_prototxt.md new file mode 100644 index 000000000..aaff2b009 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/mnist_prototxt.md @@ -0,0 +1,153 @@ +--- +layout: default +title: Caffe +--- + +Define the MNIST Network +========================= + +This page explains the prototxt file `lenet_train.prototxt` used in the MNIST demo. We assume that you are familiar with [Google Protobuf](https://developers.google.com/protocol-buffers/docs/overview), and assume that you have read the protobuf definitions used by Caffe, which can be found at [src/caffe/proto/caffe.proto](https://github.com/Yangqing/caffe/blob/master/src/caffe/proto/caffe.proto). + +Specifically, we will write a `caffe::NetParameter` (or in python, `caffe.proto.caffe_pb2.NetParameter`) protubuf. We will start by giving the network a name: + + name: "LeNet" + +Writing the Data Layer +---------------------- +Currently, we will read the MNIST data from the leveldb we created earlier in the demo. This is defined by a data layer: + + layers { + name: "mnist" + type: DATA + data_param { + source: "mnist-train-leveldb" + batch_size: 64 + scale: 0.00390625 + } + top: "data" + top: "label" + } + +Specifically, this layer has name `mnist`, type `data`, and it reads the data from the given leveldb source. We will use a batch size of 64, and scale the incoming pixels so that they are in the range \[0,1\). Why 0.00390625? It is 1 divided by 256. And finally, this layer produces two blobs, one is the `data` blob, and one is the `label` blob. + +Writing the Convolution Layer +-------------------------------------------- +Let's define the first convolution layer: + + layers { + name: "conv1" + type: CONVOLUTION + blobs_lr: 1. + blobs_lr: 2. + convolution_param { + num_output: 20 + kernelsize: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + bottom: "data" + top: "conv1" + } + +This layer takes the `data` blob (it is provided by the data layer), and produces the `conv1` layer. It produces outputs of 20 channels, with the convolutional kernel size 5 and carried out with stride 1. + +The fillers allow us to randomly initialize the value of the weights and bias. For the weight filler, we will use the `xavier` algorithm that automatically determines the scale of initialization based on the number of input and output neurons. For the bias filler, we will simply initialize it as constant, with the default filling value 0. + +`blobs_lr` are the learning rate adjustments for the layer's learnable parameters. In this case, we will set the weight learning rate to be the same as the learning rate given by the solver during runtime, and the bias learning rate to be twice as large as that - this usually leads to better convergence rates. + +Writing the Pooling Layer +------------------------- +Phew. Pooling layers are actually much easier to define: + + layers { + name: "pool1" + type: POOLING + pooling_param { + kernel_size: 2 + stride: 2 + pool: MAX + } + bottom: "conv1" + top: "pool1" + } + +This says we will perform max pooling with a pool kernel size 2 and a stride of 2 (so no overlapping between neighboring pooling regions). + +Similarly, you can write up the second convolution and pooling layers. Check `data/lenet.prototxt` for details. + +Writing the Fully Connected Layer +---------------------------------- +Writing a fully connected layer is also simple: + + layers { + name: "ip1" + type: INNER_PRODUCT + blobs_lr: 1. + blobs_lr: 2. + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + bottom: "pool2" + top: "ip1" + } + +This defines a fully connected layer (for some legacy reason, Caffe calls it an `innerproduct` layer) with 500 outputs. All other lines look familiar, right? + +Writing the ReLU Layer +---------------------- +A ReLU Layer is also simple: + + layers { + name: "relu1" + type: RELU + bottom: "ip1" + top: "ip1" + } + +Since ReLU is an element-wise operation, we can do *in-place* operations to save some memory. This is achieved by simply giving the same name to the bottom and top blobs. Of course, do NOT use duplicated blob names for other layer types! + +After the ReLU layer, we will write another innerproduct layer: + + layers { + name: "ip2" + type: INNER_PRODUCT + blobs_lr: 1. + blobs_lr: 2. + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } + bottom: "ip1" + top: "ip2" + } + +Writing the Loss Layer +------------------------- +Finally, we will write the loss! + + layers { + name: "loss" + type: SOFTMAX_LOSS + bottom: "ip2" + bottom: "label" + } + +The `softmax_loss` layer implements both the softmax and the multinomial logistic loss (that saves time and improves numerical stability). It takes two blobs, the first one being the prediction and the second one being the `label` provided by the data layer (remember it?). It does not produce any outputs - all it does is to compute the loss function value, report it when backpropagation starts, and initiates the gradient with respect to `ip2`. This is where all magic starts. + +Now that we have demonstrated how to write the MNIST layer definition prototxt, maybe check out [how we write a solver prototxt](mnist_solver_prototxt.html)? diff --git a/modules/dnns_easily_fooled/caffe/docs/mnist_solver_prototxt.md b/modules/dnns_easily_fooled/caffe/docs/mnist_solver_prototxt.md new file mode 100644 index 000000000..aa3578f11 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/mnist_solver_prototxt.md @@ -0,0 +1,37 @@ +--- +layout: default +title: Caffe +--- + +Define the MNIST Solver +======================= + +The page is under construction. For now, check out the comments in the solver prototxt file, which explains each line in the prototxt: + + # The training protocol buffer definition + train_net: "lenet_train.prototxt" + # The testing protocol buffer definition + test_net: "lenet_test.prototxt" + # test_iter specifies how many forward passes the test should carry out. + # In the case of MNIST, we have test batch size 100 and 100 test iterations, + # covering the full 10,000 testing images. + test_iter: 100 + # Carry out testing every 500 training iterations. + test_interval: 500 + # The base learning rate, momentum and the weight decay of the network. + base_lr: 0.01 + momentum: 0.9 + weight_decay: 0.0005 + # The learning rate policy + lr_policy: "inv" + gamma: 0.0001 + power: 0.75 + # Display every 100 iterations + display: 100 + # The maximum number of iterations + max_iter: 10000 + # snapshot intermediate results + snapshot: 5000 + snapshot_prefix: "lenet" + # solver mode: 0 for CPU and 1 for GPU + solver_mode: 1 diff --git a/modules/dnns_easily_fooled/caffe/docs/performance_hardware.md b/modules/dnns_easily_fooled/caffe/docs/performance_hardware.md new file mode 100644 index 000000000..7a08b8a53 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/performance_hardware.md @@ -0,0 +1,57 @@ +--- +layout: default +title: Caffe +--- + +# Performance and Hardware Configuration + +To measure performance on different NVIDIA GPUs we use the Caffe reference ImageNet model. + +For training, each time point is 20 iterations/minibatches of 256 images for 5,120 images total. For testing, a 50,000 image validation set is classified. + +**Acknowledgements**: BVLC members are very grateful to NVIDIA for providing several GPUs to conduct this research. + +## NVIDIA K40 + +Performance is best with ECC off and boost clock enabled. While ECC makes a negligible difference in speed, disabling it frees ~1 GB of GPU memory. + +Best settings with ECC off and maximum clock speed: + +* Training is 26.5 secs / 20 iterations (5,120 images) +* Testing is 100 secs / validation set (50,000 images) + +Other settings: + +* ECC on, max speed: training 26.7 secs / 20 iterations, test 101 secs / validation set +* ECC on, default speed: training 31 secs / 20 iterations, test 117 secs / validation set +* ECC off, default speed: training 31 secs / 20 iterations, test 118 secs / validation set + +### K40 configuration tips + +For maximum K40 performance, turn off ECC and boost the clock speed (at your own risk). + +To turn off ECC, do + + sudo nvidia-smi -i 0 --ecc-config=0 # repeat with -i x for each GPU ID + +then reboot. + +Set the "persistence" mode of the GPU settings by + + sudo nvidia-smi -pm 1 + +and then set the clock speed with + + sudo nvidia-smi -i 0 -ac 3004,875 # repeat with -i x for each GPU ID + +but note that this configuration resets across driver reloading / rebooting. Include these commands in a boot script to intialize these settings. For a simple fix, add these commands to `/etc/rc.local` (on Ubuntu). + +## NVIDIA Titan + +Training: 26.26 secs / 20 iterations (5,120 images). +Testing: 100 secs / validation set (50,000 images). + +## NVIDIA K20 + +Training: 36.0 secs / 20 iterations (5,120 images). +Testing: 133 secs / validation set (50,000 images) diff --git a/modules/dnns_easily_fooled/caffe/docs/stylesheets/pygment_trac.css b/modules/dnns_easily_fooled/caffe/docs/stylesheets/pygment_trac.css new file mode 100644 index 000000000..c6a6452d2 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/stylesheets/pygment_trac.css @@ -0,0 +1,69 @@ +.highlight { background: #ffffff; } +.highlight .c { color: #999988; font-style: italic } /* Comment */ +.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ +.highlight .k { font-weight: bold } /* Keyword */ +.highlight .o { font-weight: bold } /* Operator */ +.highlight .cm { color: #999988; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #999999; font-weight: bold } /* Comment.Preproc */ +.highlight .c1 { color: #999988; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #999999; font-weight: bold; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ +.highlight .gd .x { color: #000000; background-color: #ffaaaa } /* Generic.Deleted.Specific */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #aa0000 } /* Generic.Error */ +.highlight .gh { color: #999999 } /* Generic.Heading */ +.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ +.highlight .gi .x { color: #000000; background-color: #aaffaa } /* Generic.Inserted.Specific */ +.highlight .go { color: #888888 } /* Generic.Output */ +.highlight .gp { color: #555555 } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold; } /* Generic.Subheading */ +.highlight .gt { color: #aa0000 } /* Generic.Traceback */ +.highlight .kc { font-weight: bold } /* Keyword.Constant */ +.highlight .kd { font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { font-weight: bold } /* Keyword.Pseudo */ +.highlight .kr { font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #445588; font-weight: bold } /* Keyword.Type */ +.highlight .m { color: #009999 } /* Literal.Number */ +.highlight .s { color: #d14 } /* Literal.String */ +.highlight .na { color: #008080 } /* Name.Attribute */ +.highlight .nb { color: #0086B3 } /* Name.Builtin */ +.highlight .nc { color: #445588; font-weight: bold } /* Name.Class */ +.highlight .no { color: #008080 } /* Name.Constant */ +.highlight .ni { color: #800080 } /* Name.Entity */ +.highlight .ne { color: #990000; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #990000; font-weight: bold } /* Name.Function */ +.highlight .nn { color: #555555 } /* Name.Namespace */ +.highlight .nt { color: #000080 } /* Name.Tag */ +.highlight .nv { color: #008080 } /* Name.Variable */ +.highlight .ow { font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mf { color: #009999 } /* Literal.Number.Float */ +.highlight .mh { color: #009999 } /* Literal.Number.Hex */ +.highlight .mi { color: #009999 } /* Literal.Number.Integer */ +.highlight .mo { color: #009999 } /* Literal.Number.Oct */ +.highlight .sb { color: #d14 } /* Literal.String.Backtick */ +.highlight .sc { color: #d14 } /* Literal.String.Char */ +.highlight .sd { color: #d14 } /* Literal.String.Doc */ +.highlight .s2 { color: #d14 } /* Literal.String.Double */ +.highlight .se { color: #d14 } /* Literal.String.Escape */ +.highlight .sh { color: #d14 } /* Literal.String.Heredoc */ +.highlight .si { color: #d14 } /* Literal.String.Interpol */ +.highlight .sx { color: #d14 } /* Literal.String.Other */ +.highlight .sr { color: #009926 } /* Literal.String.Regex */ +.highlight .s1 { color: #d14 } /* Literal.String.Single */ +.highlight .ss { color: #990073 } /* Literal.String.Symbol */ +.highlight .bp { color: #999999 } /* Name.Builtin.Pseudo */ +.highlight .vc { color: #008080 } /* Name.Variable.Class */ +.highlight .vg { color: #008080 } /* Name.Variable.Global */ +.highlight .vi { color: #008080 } /* Name.Variable.Instance */ +.highlight .il { color: #009999 } /* Literal.Number.Integer.Long */ + +.type-csharp .highlight .k { color: #0000FF } +.type-csharp .highlight .kt { color: #0000FF } +.type-csharp .highlight .nf { color: #000000; font-weight: normal } +.type-csharp .highlight .nc { color: #2B91AF } +.type-csharp .highlight .nn { color: #000000 } +.type-csharp .highlight .s { color: #A31515 } +.type-csharp .highlight .sc { color: #A31515 } diff --git a/modules/dnns_easily_fooled/caffe/docs/stylesheets/reset.css b/modules/dnns_easily_fooled/caffe/docs/stylesheets/reset.css new file mode 100644 index 000000000..6020b26fb --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/stylesheets/reset.css @@ -0,0 +1,21 @@ +/* MeyerWeb Reset */ + +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +b, u, i, center, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td, +article, aside, canvas, details, embed, +figure, figcaption, footer, header, hgroup, +menu, nav, output, ruby, section, summary, +time, mark, audio, video { + margin: 0; + padding: 0; + border: 0; + font: inherit; + vertical-align: baseline; +} diff --git a/modules/dnns_easily_fooled/caffe/docs/stylesheets/styles.css b/modules/dnns_easily_fooled/caffe/docs/stylesheets/styles.css new file mode 100644 index 000000000..b91cec6f8 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/docs/stylesheets/styles.css @@ -0,0 +1,393 @@ +body { + padding:10px 50px 0 0; + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-weight: 300; + font-size: 14px; + color: #232323; + background-color: #FBFAF7; + margin: 0; + line-height: 1.8em; + -webkit-font-smoothing: antialiased; + +} + +h1, h2, h3, h4, h5, h6 { + color:#232323; + margin:36px 0 10px; +} + +p, ul, ol, table, dl { + margin:0 0 22px; +} + +h1, h2, h3 { + font-family: Times, serif; + font-weight: 300; + line-height:1.3; + font-weight: normal; + display: block; + border-bottom: 1px solid #ccc; + padding-bottom: 5px; +} + +h1 { + font-size: 30px; +} + +h2 { + font-size: 24px; +} + +h3 { + font-size: 18px; +} + +h4, h5, h6 { + font-family: Times, serif; + font-weight: 700; +} + +a { + color:#C30000; + text-decoration:none; +} + +a:hover { + text-decoration: underline; +} + +a small { + font-size: 12px; +} + +em { + font-style: italic; +} + +strong { + font-weight:700; +} + +ul { + list-style: inside; + padding-left: 25px; +} + +ol { + list-style: decimal inside; + padding-left: 20px; +} + +blockquote { + margin: 0; + padding: 0 0 0 20px; + font-style: italic; +} + +dl, dt, dd, dl p { + font-color: #444; +} + +dl dt { + font-weight: bold; +} + +dl dd { + padding-left: 20px; + font-style: italic; +} + +dl p { + padding-left: 20px; + font-style: italic; +} + +hr { + border:0; + background:#ccc; + height:1px; + margin:0 0 24px; +} + +/* Images */ + +img { + position: relative; + margin: 0 auto; + max-width: 650px; + padding: 5px; + margin: 10px 0 32px 0; + border: 1px solid #ccc; +} + +p img { + display: inline; + margin: 0; + padding: 0; + vertical-align: middle; + text-align: center; + border: none; +} + +/* Code blocks */ + +code, pre { + font-family: monospace; + color:#000; + font-size:12px; + line-height: 14px; +} + +pre { + padding: 6px 12px; + background: #FDFEFB; + border-radius:4px; + border:1px solid #D7D8C8; + overflow: auto; + white-space: pre-wrap; + margin-bottom: 16px; +} + + +/* Tables */ + +table { + width:100%; +} + +table { + border: 1px solid #ccc; + margin-bottom: 32px; + text-align: left; + } + +th { + font-family: 'Arvo', Helvetica, Arial, sans-serif; + font-size: 18px; + font-weight: normal; + padding: 10px; + background: #232323; + color: #FDFEFB; + } + +td { + padding: 10px; + background: #ccc; + } + + +/* Wrapper */ +.wrapper { + width:960px; +} + + +/* Header */ + +header { + background-color: #171717; + color: #FDFDFB; + width:170px; + float:left; + position:fixed; + border: 1px solid #000; + -webkit-border-top-right-radius: 4px; + -webkit-border-bottom-right-radius: 4px; + -moz-border-radius-topright: 4px; + -moz-border-radius-bottomright: 4px; + border-top-right-radius: 4px; + border-bottom-right-radius: 4px; + padding: 12px 25px 22px 50px; + margin: 24px 25px 0 0; + -webkit-font-smoothing: antialiased; +} + +p.header { + font-size: 16px; +} + +h1.header { + /*font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;*/ + font-size: 30px; + font-weight: 300; + line-height: 1.3em; + border-bottom: none; + margin-top: 0; +} + + +h1.header, a.header, a.name, header a{ + color: #fff; +} + +a.header { + text-decoration: underline; +} + +a.name { + white-space: nowrap; +} + +header ul { + list-style:none; + padding:0; +} + +header li { + list-style-type: none; + width:132px; + height:15px; + margin-bottom: 12px; + line-height: 1em; + padding: 6px 6px 6px 7px; + + background: #AF0011; + background: -moz-linear-gradient(top, #AF0011 0%, #820011 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#f8f8f8), color-stop(100%,#dddddd)); + background: -webkit-linear-gradient(top, #AF0011 0%,#820011 100%); + background: -o-linear-gradient(top, #AF0011 0%,#820011 100%); + background: -ms-linear-gradient(top, #AF0011 0%,#820011 100%); + background: linear-gradient(top, #AF0011 0%,#820011 100%); + + border-radius:4px; + border:1px solid #0D0D0D; + + -webkit-box-shadow: inset 0px 1px 1px 0 rgba(233,2,38, 1); + box-shadow: inset 0px 1px 1px 0 rgba(233,2,38, 1); + +} + +header li:hover { + background: #C3001D; + background: -moz-linear-gradient(top, #C3001D 0%, #950119 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#f8f8f8), color-stop(100%,#dddddd)); + background: -webkit-linear-gradient(top, #C3001D 0%,#950119 100%); + background: -o-linear-gradient(top, #C3001D 0%,#950119 100%); + background: -ms-linear-gradient(top, #C3001D 0%,#950119 100%); + background: linear-gradient(top, #C3001D 0%,#950119 100%); +} + +a.buttons { + -webkit-font-smoothing: antialiased; + background: url(../images/arrow-down.png) no-repeat; + font-weight: normal; + text-shadow: rgba(0, 0, 0, 0.4) 0 -1px 0; + padding: 2px 2px 2px 22px; + height: 30px; +} + +a.github { + background: url(../images/octocat-small.png) no-repeat 1px; +} + +a.buttons:hover { + color: #fff; + text-decoration: none; +} + + +/* Section - for main page content */ + +section { + width:650px; + float:right; + padding-bottom:50px; +} + + +/* Footer */ + +footer { + width:170px; + float:left; + position:fixed; + bottom:10px; + padding-left: 50px; +} + +@media print, screen and (max-width: 960px) { + + div.wrapper { + width:auto; + margin:0; + } + + header, section, footer { + float:none; + position:static; + width:auto; + } + + footer { + border-top: 1px solid #ccc; + margin:0 84px 0 50px; + padding:0; + } + + header { + padding-right:320px; + } + + section { + padding:20px 84px 20px 50px; + margin:0 0 20px; + } + + header a small { + display:inline; + } + + header ul { + position:absolute; + right:130px; + top:84px; + } +} + +@media print, screen and (max-width: 720px) { + body { + word-wrap:break-word; + } + + header { + padding:10px 20px 0; + margin-right: 0; + } + + section { + padding:10px 0 10px 20px; + margin:0 0 30px; + } + + footer { + margin: 0 0 0 30px; + } + + header ul, header p.view { + position:static; + } +} + +@media print, screen and (max-width: 480px) { + + header ul li.download { + display:none; + } + + footer { + margin: 0 0 0 20px; + } + + footer a{ + display:block; + } + +} + +@media print { + body { + padding:0.4in; + font-size:12pt; + color:#444; + } +} diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/blob.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/blob.hpp new file mode 100644 index 000000000..75101462f --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/blob.hpp @@ -0,0 +1,100 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_BLOB_HPP_ +#define CAFFE_BLOB_HPP_ + +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +template +class Blob { + public: + Blob() + : num_(0), channels_(0), height_(0), width_(0), count_(0), data_(), + diff_() {} + explicit Blob(const int num, const int channels, const int height, + const int width); + void Reshape(const int num, const int channels, const int height, + const int width); + void ReshapeLike(const Blob& other); + inline int num() const { return num_; } + inline int channels() const { return channels_; } + inline int height() const { return height_; } + inline int width() const { return width_; } + inline int count() const {return count_; } + inline int offset(const int n, const int c = 0, const int h = 0, + const int w = 0) const { + CHECK_GE(n, 0); + CHECK_LE(n, num_); + CHECK_GE(channels_, 0); + CHECK_LE(c, channels_); + CHECK_GE(height_, 0); + CHECK_LE(h, height_); + CHECK_GE(width_, 0); + CHECK_LE(w, width_); + return ((n * channels_ + c) * height_ + h) * width_ + w; + } + // Copy from source. If copy_diff is false, we copy the data; if copy_diff + // is true, we copy the diff. + void CopyFrom(const Blob& source, bool copy_diff = false, + bool reshape = false); + + inline Dtype data_at(const int n, const int c, const int h, + const int w) const { + return *(cpu_data() + offset(n, c, h, w)); + } + + inline Dtype diff_at(const int n, const int c, const int h, + const int w) const { + return *(cpu_diff() + offset(n, c, h, w)); + } + + inline const shared_ptr& data() const { + CHECK(data_); + return data_; + } + + inline const shared_ptr& diff() const { + CHECK(diff_); + return diff_; + } + + const Dtype* cpu_data() const; + void set_cpu_data(Dtype* data); + const Dtype* gpu_data() const; + const Dtype* cpu_diff() const; + const Dtype* gpu_diff() const; + Dtype* mutable_cpu_data(); + Dtype* mutable_gpu_data(); + Dtype* mutable_cpu_diff(); + Dtype* mutable_gpu_diff(); + void Update(); + void FromProto(const BlobProto& proto); + void ToProto(BlobProto* proto, bool write_diff = false) const; + + // Set the data_/diff_ shared_ptr to point to the SyncedMemory holding the + // data_/diff_ of Blob other -- useful in layers which simply perform a copy + // in their forward or backward pass. + // This deallocates the SyncedMemory holding this blob's data/diff, as + // shared_ptr calls its destructor when reset with the = operator. + void ShareData(const Blob& other); + void ShareDiff(const Blob& other); + + protected: + shared_ptr data_; + shared_ptr diff_; + int num_; + int channels_; + int height_; + int width_; + int count_; + + DISABLE_COPY_AND_ASSIGN(Blob); +}; // class Blob + +} // namespace caffe + +#endif // CAFFE_BLOB_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/caffe.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/caffe.hpp new file mode 100644 index 000000000..ada069547 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/caffe.hpp @@ -0,0 +1,19 @@ +// Copyright 2014 BVLC and contributors. +// caffe.hpp is the header file that you need to include in your code. It wraps +// all the internal caffe header files into one for simpler inclusion. + +#ifndef CAFFE_CAFFE_HPP_ +#define CAFFE_CAFFE_HPP_ + +#include "caffe/common.hpp" +#include "caffe/blob.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/net.hpp" +#include "caffe/solver.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/proto/caffe.pb.h" + +#endif // CAFFE_CAFFE_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/common.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/common.hpp new file mode 100644 index 000000000..7bfa5d402 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/common.hpp @@ -0,0 +1,169 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_COMMON_HPP_ +#define CAFFE_COMMON_HPP_ + +#include +#include +#include +#include +#include // cuda driver types +#include + +// Disable the copy and assignment operator for a class. +#define DISABLE_COPY_AND_ASSIGN(classname) \ +private:\ + classname(const classname&);\ + classname& operator=(const classname&) + +// Instantiate a class with float and double specifications. +#define INSTANTIATE_CLASS(classname) \ + template class classname; \ + template class classname + +// A simple macro to mark codes that are not implemented, so that when the code +// is executed we will see a fatal log. +#define NOT_IMPLEMENTED LOG(FATAL) << "Not Implemented Yet" + +// CUDA: various checks for different function calls. +#define CUDA_CHECK(condition) \ + /* Code block avoids redefinition of cudaError_t error */ \ + do { \ + cudaError_t error = condition; \ + CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ + } while (0) + +#define CUBLAS_CHECK(condition) \ + do { \ + cublasStatus_t status = condition; \ + CHECK_EQ(status, CUBLAS_STATUS_SUCCESS) << " " \ + << caffe::cublasGetErrorString(status); \ + } while (0) + +#define CURAND_CHECK(condition) \ + do { \ + curandStatus_t status = condition; \ + CHECK_EQ(status, CURAND_STATUS_SUCCESS) << " " \ + << caffe::curandGetErrorString(status); \ + } while (0) + +// CUDA: grid stride looping +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +// CUDA: check for error after kernel execution and exit loudly if there is one. +#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError()) + +// Define not supported status for pre-6.0 compatibility. +#if CUDA_VERSION < 6000 +#define CUBLAS_STATUS_NOT_SUPPORTED 831486 +#endif + +namespace caffe { + +// We will use the boost shared_ptr instead of the new C++11 one mainly +// because cuda does not work (at least now) well with C++11 features. +using boost::shared_ptr; + + +// A singleton class to hold common caffe stuff, such as the handler that +// caffe is going to use for cublas, curand, etc. +class Caffe { + public: + ~Caffe(); + inline static Caffe& Get() { + if (!singleton_.get()) { + singleton_.reset(new Caffe()); + } + return *singleton_; + } + enum Brew { CPU, GPU }; + enum Phase { TRAIN, TEST }; + + + // This random number generator facade hides boost and CUDA rng + // implementation from one another (for cross-platform compatibility). + class RNG { + public: + RNG(); + explicit RNG(unsigned int seed); + explicit RNG(const RNG&); + RNG& operator=(const RNG&); + void* generator(); + private: + class Generator; + shared_ptr generator_; + }; + + // Getters for boost rng, curand, and cublas handles + inline static RNG& rng_stream() { + if (!Get().random_generator_) { + Get().random_generator_.reset(new RNG()); + } + return *(Get().random_generator_); + } + inline static cublasHandle_t cublas_handle() { return Get().cublas_handle_; } + inline static curandGenerator_t curand_generator() { + return Get().curand_generator_; + } + + // Returns the mode: running on CPU or GPU. + inline static Brew mode() { return Get().mode_; } + // Returns the phase: TRAIN or TEST. + inline static Phase phase() { return Get().phase_; } + // The setters for the variables + // Sets the mode. It is recommended that you don't change the mode halfway + // into the program since that may cause allocation of pinned memory being + // freed in a non-pinned way, which may cause problems - I haven't verified + // it personally but better to note it here in the header file. + inline static void set_mode(Brew mode) { Get().mode_ = mode; } + // Sets the phase. + inline static void set_phase(Phase phase) { Get().phase_ = phase; } + // Sets the random seed of both boost and curand + static void set_random_seed(const unsigned int seed); + // Sets the device. Since we have cublas and curand stuff, set device also + // requires us to reset those values. + static void SetDevice(const int device_id); + // Prints the current GPU status. + static void DeviceQuery(); + + protected: + cublasHandle_t cublas_handle_; + curandGenerator_t curand_generator_; + shared_ptr random_generator_; + + Brew mode_; + Phase phase_; + static shared_ptr singleton_; + + private: + // The private constructor to avoid duplicate instantiation. + Caffe(); + + DISABLE_COPY_AND_ASSIGN(Caffe); +}; + +// NVIDIA_CUDA-5.5_Samples/common/inc/helper_cuda.h +const char* cublasGetErrorString(cublasStatus_t error); +const char* curandGetErrorString(curandStatus_t error); + +// CUDA: thread number configuration. +// Use 1024 threads per block, which requires cuda sm_2x or above, +// or fall back to attempt compatibility (best of luck to you). +#if __CUDA_ARCH__ >= 200 + const int CAFFE_CUDA_NUM_THREADS = 1024; +#else + const int CAFFE_CUDA_NUM_THREADS = 512; +#endif + +// CUDA: number of blocks for threads. +inline int CAFFE_GET_BLOCKS(const int N) { + return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; +} + + +} // namespace caffe + +#endif // CAFFE_COMMON_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/data_layers.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/data_layers.hpp new file mode 100644 index 000000000..8b580c929 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/data_layers.hpp @@ -0,0 +1,337 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_DATA_LAYERS_HPP_ +#define CAFFE_DATA_LAYERS_HPP_ + +#include +#include +#include +#include + +#include "leveldb/db.h" +#include "lmdb.h" +#include "pthread.h" +#include "hdf5.h" +#include "boost/scoped_ptr.hpp" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +#define HDF5_DATA_DATASET_NAME "data" +#define HDF5_DATA_LABEL_NAME "label" + +template +class HDF5OutputLayer : public Layer { + public: + explicit HDF5OutputLayer(const LayerParameter& param); + virtual ~HDF5OutputLayer(); + virtual void SetUp(const vector*>& bottom, + vector*>* top) {} + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_HDF5_OUTPUT; + } + // TODO: no limit on the number of blobs + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + inline std::string file_name() const { return file_name_; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void SaveBlobs(); + + std::string file_name_; + hid_t file_id_; + Blob data_blob_; + Blob label_blob_; +}; + + +template +class HDF5DataLayer : public Layer { + public: + explicit HDF5DataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~HDF5DataLayer(); + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_HDF5_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void LoadHDF5FileData(const char* filename); + + std::vector hdf_filenames_; + unsigned int num_files_; + unsigned int current_file_; + hsize_t current_row_; + Blob data_blob_; + Blob label_blob_; +}; + +// TODO: DataLayer, ImageDataLayer, and WindowDataLayer all have the +// same basic structure and a lot of duplicated code. + +// This function is used to create a pthread that prefetches the data. +template +void* DataLayerPrefetch(void* layer_pointer); + +template +class DataLayer : public Layer { + // The function used to perform prefetching. + friend void* DataLayerPrefetch(void* layer_pointer); + + public: + explicit DataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~DataLayer(); + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + + virtual void CreatePrefetchThread(); + virtual void JoinPrefetchThread(); + virtual unsigned int PrefetchRand(); + + shared_ptr prefetch_rng_; + + // LEVELDB + shared_ptr db_; + shared_ptr iter_; + // LMDB + MDB_env* mdb_env_; + MDB_dbi mdb_dbi_; + MDB_txn* mdb_txn_; + MDB_cursor* mdb_cursor_; + MDB_val mdb_key_, mdb_value_; + + int datum_channels_; + int datum_height_; + int datum_width_; + int datum_size_; + pthread_t thread_; + shared_ptr > prefetch_data_; + shared_ptr > prefetch_label_; + Blob data_mean_; + bool output_labels_; + Caffe::Phase phase_; +}; + +template +class DummyDataLayer : public Layer { + public: + explicit DummyDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_DUMMY_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + + vector > > fillers_; + vector refill_; +}; + +// This function is used to create a pthread that prefetches the data. +template +void* ImageDataLayerPrefetch(void* layer_pointer); + +template +class ImageDataLayer : public Layer { + // The function used to perform prefetching. + friend void* ImageDataLayerPrefetch(void* layer_pointer); + + public: + explicit ImageDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~ImageDataLayer(); + virtual void SetUp(const vector*>& bottom, + vector*>* top); + void SetUpWithDatum(const int crop_size, const Datum datum, + vector*>* top); + virtual void AddImagesAndLabels(const vector& images, + const vector& labels); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_IMAGE_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + + virtual void ShuffleImages(); + + virtual void CreatePrefetchThread(); + virtual void JoinPrefetchThread(); + virtual unsigned int PrefetchRand(); + + shared_ptr prefetch_rng_; + vector > lines_; + int lines_id_; + int datum_channels_; + int datum_height_; + int datum_width_; + int datum_size_; + pthread_t thread_; + shared_ptr > prefetch_data_; + shared_ptr > prefetch_label_; + Blob data_mean_; + Caffe::Phase phase_; + bool is_datum_set_up_; + vector*>* top_; +}; + +/* MemoryDataLayer +*/ +template +class MemoryDataLayer : public Layer { + public: + explicit MemoryDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_MEMORY_DATA; + } + virtual inline int ExactNumBottomBlobs() { return 0; } + virtual inline int ExactNumTopBlobs() { return 2; } + + // Reset should accept const pointers, but can't, because the memory + // will be given to Blob, which is mutable + void Reset(Dtype* data, Dtype* label, int n); + int datum_channels() { return datum_channels_; } + int datum_height() { return datum_height_; } + int datum_width() { return datum_width_; } + int batch_size() { return batch_size_; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + + Dtype* data_; + Dtype* labels_; + int datum_channels_; + int datum_height_; + int datum_width_; + int datum_size_; + int batch_size_; + int n_; + int pos_; +}; + +// This function is used to create a pthread that prefetches the window data. +template +void* WindowDataLayerPrefetch(void* layer_pointer); + +template +class WindowDataLayer : public Layer { + // The function used to perform prefetching. + friend void* WindowDataLayerPrefetch(void* layer_pointer); + + public: + explicit WindowDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~WindowDataLayer(); + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_WINDOW_DATA; + } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { return; } + + virtual void CreatePrefetchThread(); + virtual void JoinPrefetchThread(); + virtual unsigned int PrefetchRand(); + + shared_ptr prefetch_rng_; + pthread_t thread_; + shared_ptr > prefetch_data_; + shared_ptr > prefetch_label_; + Blob data_mean_; + vector > > image_database_; + enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; + vector > fg_windows_; + vector > bg_windows_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/filler.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/filler.hpp new file mode 100644 index 000000000..242f11a35 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/filler.hpp @@ -0,0 +1,173 @@ +// Copyright 2014 BVLC and contributors. + +// Fillers are random number generators that fills a blob using the specified +// algorithm. The expectation is that they are only going to be used during +// initialization time and will not involve any GPUs. + +#ifndef CAFFE_FILLER_HPP +#define CAFFE_FILLER_HPP + +#include + +#include "caffe/common.hpp" +#include "caffe/blob.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +template +class Filler { + public: + explicit Filler(const FillerParameter& param) : filler_param_(param) {} + virtual ~Filler() {} + virtual void Fill(Blob* blob) = 0; + protected: + FillerParameter filler_param_; +}; // class Filler + + +template +class ConstantFiller : public Filler { + public: + explicit ConstantFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + const int count = blob->count(); + const Dtype value = this->filler_param_.value(); + CHECK(count); + for (int i = 0; i < count; ++i) { + data[i] = value; + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +template +class UniformFiller : public Filler { + public: + explicit UniformFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), + Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +template +class GaussianFiller : public Filler { + public: + explicit GaussianFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + CHECK(blob->count()); + caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), + Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); + int sparse = this->filler_param_.sparse(); + CHECK_GE(sparse, -1); + if (sparse >= 0) { + // Sparse initialization is implemented for "weight" blobs; i.e. matrices. + // These have num == channels == 1; height is number of inputs; width is + // number of outputs. The 'sparse' variable specifies the mean number + // of non-zero input weights for a given output. + CHECK_EQ(blob->num(), 1); + CHECK_EQ(blob->channels(), 1); + int num_inputs = blob->height(); + Dtype non_zero_probability = Dtype(sparse) / Dtype(num_inputs); + rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); + int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); + caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); + for (int i = 0; i < blob->count(); ++i) { + data[i] *= mask[i]; + } + } + } + + protected: + shared_ptr rand_vec_; +}; + +template +class PositiveUnitballFiller : public Filler { + public: + explicit PositiveUnitballFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + DCHECK(blob->count()); + caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); + // We expect the filler to not be called very frequently, so we will + // just use a simple implementation + int dim = blob->count() / blob->num(); + CHECK(dim); + for (int i = 0; i < blob->num(); ++i) { + Dtype sum = 0; + for (int j = 0; j < dim; ++j) { + sum += data[i * dim + j]; + } + for (int j = 0; j < dim; ++j) { + data[i * dim + j] /= sum; + } + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +// A filler based on the paper [Bengio and Glorot 2010]: Understanding +// the difficulty of training deep feedforward neuralnetworks, but does not +// use the fan_out value. +// +// It fills the incoming matrix by randomly sampling uniform data from +// [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number +// of input nodes. You should make sure the input blob has shape (num, a, b, c) +// where a * b * c = fan_in. +template +class XavierFiller : public Filler { + public: + explicit XavierFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); + Dtype scale = sqrt(Dtype(3) / fan_in); + caffe_rng_uniform(blob->count(), -scale, scale, + blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + + +// A function to get a specific filler from the specification given in +// FillerParameter. Ideally this would be replaced by a factory pattern, +// but we will leave it this way for now. +template +Filler* GetFiller(const FillerParameter& param) { + const std::string& type = param.type(); + if (type == "constant") { + return new ConstantFiller(param); + } else if (type == "gaussian") { + return new GaussianFiller(param); + } else if (type == "positive_unitball") { + return new PositiveUnitballFiller(param); + } else if (type == "uniform") { + return new UniformFiller(param); + } else if (type == "xavier") { + return new XavierFiller(param); + } else { + CHECK(false) << "Unknown filler name: " << param.type(); + } + return (Filler*)(NULL); +} + +} // namespace caffe + +#endif // CAFFE_FILLER_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/layer.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/layer.hpp new file mode 100644 index 000000000..af3d5441b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/layer.hpp @@ -0,0 +1,206 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_LAYER_H_ +#define CAFFE_LAYER_H_ + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +using std::string; +using std::vector; + +namespace caffe { + +template +class Layer { + public: + // You should not implement your own constructor. Any set up code should go + // to SetUp(), where the dimensions of the bottom blobs are provided to the + // layer. + explicit Layer(const LayerParameter& param) + : layer_param_(param) { + // The only thing we do is to copy blobs if there are any. + if (layer_param_.blobs_size() > 0) { + blobs_.resize(layer_param_.blobs_size()); + for (int i = 0; i < layer_param_.blobs_size(); ++i) { + blobs_[i].reset(new Blob()); + blobs_[i]->FromProto(layer_param_.blobs(i)); + } + } + } + virtual ~Layer() {} + // SetUp: your function should implement this, and call Layer::SetUp for + // common SetUp functionality. + virtual void SetUp(const vector*>& bottom, + vector*>* top) { + CheckBlobCounts(bottom, *top); + } + + // Forward and backward wrappers. You should implement the cpu and + // gpu specific implementations instead, and should not change these + // functions. + inline Dtype Forward(const vector*>& bottom, + vector*>* top); + inline void Backward(const vector*>& top, + const bool propagate_down, + vector*>* bottom); + + // Returns the vector of blobs. + vector > >& blobs() { + return blobs_; + } + + // Returns the layer parameter + const LayerParameter& layer_param() { return layer_param_; } + // Writes the layer parameter to a protocol buffer + virtual void ToProto(LayerParameter* param, bool write_diff = false); + + // Returns the layer type as an enum value. + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_NONE; + } + + // Returns the layer type name. + virtual inline const string& type_name() const { + return LayerParameter_LayerType_Name(type()); + } + + // These methods can be overwritten to declare that this layer type expects + // a certain number of blobs as input and output. + // + // ExactNum{Bottom,Top}Blobs return a non-negative number to require an exact + // number of bottom/top blobs; the Min/Max versions return a non-negative + // number to require a minimum and/or maximum number of blobs. + // If Exact is specified, neither Min nor Max should be specified, and vice + // versa. These methods may not rely on SetUp having been called. + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return -1; } + virtual inline int MaxBottomBlobs() const { return -1; } + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return -1; } + virtual inline int MaxTopBlobs() const { return -1; } + + protected: + // The protobuf that stores the layer parameters + LayerParameter layer_param_; + // The vector that stores the parameters as a set of blobs. + vector > > blobs_; + + // Forward functions: compute the layer output + // (and loss layers return the loss; other layers return the dummy value 0.) + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top) = 0; + // If no gpu code is provided, we will simply use cpu code. + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top) { + // LOG(WARNING) << "Using CPU code as backup."; + return Forward_cpu(bottom, top); + } + + // Backward functions: compute the gradients for any parameters and + // for the bottom blobs if propagate_down is true. + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) = 0; + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + // LOG(WARNING) << "Using CPU code as backup."; + Backward_cpu(top, propagate_down, bottom); + } + + // CheckBlobCounts: called by the parent Layer's SetUp to check that the + // number of bottom and top Blobs provided as input match the expected + // numbers specified by the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions. + virtual void CheckBlobCounts(const vector*>& bottom, + const vector*>& top) { + if (ExactNumBottomBlobs() >= 0) { + CHECK_EQ(ExactNumBottomBlobs(), bottom.size()) + << type_name() << " Layer takes " << ExactNumBottomBlobs() + << " bottom blob(s) as input."; + } + if (MinBottomBlobs() >= 0) { + CHECK_LE(MinBottomBlobs(), bottom.size()) + << type_name() << " Layer takes at least " << MinBottomBlobs() + << " bottom blob(s) as input."; + } + if (MaxBottomBlobs() >= 0) { + CHECK_GE(MaxBottomBlobs(), bottom.size()) + << type_name() << " Layer takes at most " << MaxBottomBlobs() + << " bottom blob(s) as input."; + } + if (ExactNumTopBlobs() >= 0) { + CHECK_EQ(ExactNumTopBlobs(), top.size()) + << type_name() << " Layer produces " << ExactNumTopBlobs() + << " top blob(s) as output."; + } + if (MinTopBlobs() >= 0) { + CHECK_LE(MinTopBlobs(), top.size()) + << type_name() << " Layer produces at least " << MinTopBlobs() + << " top blob(s) as output."; + } + if (MaxTopBlobs() >= 0) { + CHECK_GE(MaxTopBlobs(), top.size()) + << type_name() << " Layer produces at most " << MaxTopBlobs() + << " top blob(s) as output."; + } + } + + DISABLE_COPY_AND_ASSIGN(Layer); +}; // class Layer + +// Forward and backward wrappers. You should implement the cpu and +// gpu specific implementations instead, and should not change these +// functions. +template +inline Dtype Layer::Forward(const vector*>& bottom, + vector*>* top) { + switch (Caffe::mode()) { + case Caffe::CPU: + return Forward_cpu(bottom, top); + case Caffe::GPU: + return Forward_gpu(bottom, top); + default: + LOG(FATAL) << "Unknown caffe mode."; + return Dtype(0); + } +} + +template +inline void Layer::Backward(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + switch (Caffe::mode()) { + case Caffe::CPU: + Backward_cpu(top, propagate_down, bottom); + break; + case Caffe::GPU: + Backward_gpu(top, propagate_down, bottom); + break; + default: + LOG(FATAL) << "Unknown caffe mode."; + } +} + +// Serialize LayerParameter to protocol buffer +template +void Layer::ToProto(LayerParameter* param, bool write_diff) { + param->Clear(); + param->CopyFrom(layer_param_); + param->clear_blobs(); + for (int i = 0; i < blobs_.size(); ++i) { + blobs_[i]->ToProto(param->add_blobs(), write_diff); + } +} + +// The layer factory function +template +Layer* GetLayer(const LayerParameter& param); + +} // namespace caffe + +#endif // CAFFE_LAYER_H_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/loss_layers.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/loss_layers.hpp new file mode 100644 index 000000000..381bf0f4f --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/loss_layers.hpp @@ -0,0 +1,198 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_LOSS_LAYERS_HPP_ +#define CAFFE_LOSS_LAYERS_HPP_ + +#include +#include +#include + +#include "leveldb/db.h" +#include "pthread.h" +#include "boost/scoped_ptr.hpp" +#include "hdf5.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +const float kLOG_THRESHOLD = 1e-20; + +/* LossLayer + Takes two inputs of same num (a and b), and has no output. + The gradient is propagated to a. +*/ +template +class LossLayer : public Layer { + public: + explicit LossLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp( + const vector*>& bottom, vector*>* top); + virtual void FurtherSetUp( + const vector*>& bottom, vector*>* top) {} + + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } +}; + +/* SigmoidCrossEntropyLossLayer +*/ +template +class SigmoidCrossEntropyLossLayer : public LossLayer { + public: + explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) + : LossLayer(param), + sigmoid_layer_(new SigmoidLayer(param)), + sigmoid_output_(new Blob()) {} + virtual void FurtherSetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + shared_ptr > sigmoid_layer_; + // sigmoid_output stores the output of the sigmoid layer. + shared_ptr > sigmoid_output_; + // Vector holders to call the underlying sigmoid layer forward and backward. + vector*> sigmoid_bottom_vec_; + vector*> sigmoid_top_vec_; +}; + +/* EuclideanLossLayer + Compute the L_2 distance between the two inputs. + + loss = (1/2 \sum_i (a_i - b_i)^2) + a' = 1/I (a - b) +*/ +template +class EuclideanLossLayer : public LossLayer { + public: + explicit EuclideanLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void FurtherSetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_EUCLIDEAN_LOSS; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + Blob diff_; +}; + +/* InfogainLossLayer +*/ +template +class InfogainLossLayer : public LossLayer { + public: + explicit InfogainLossLayer(const LayerParameter& param) + : LossLayer(param), infogain_() {} + virtual void FurtherSetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_INFOGAIN_LOSS; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + Blob infogain_; +}; + +/* HingeLossLayer +*/ +template +class HingeLossLayer : public LossLayer { + public: + explicit HingeLossLayer(const LayerParameter& param) + : LossLayer(param) {} + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_HINGE_LOSS; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); +}; + +/* MultinomialLogisticLossLayer +*/ +template +class MultinomialLogisticLossLayer : public LossLayer { + public: + explicit MultinomialLogisticLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void FurtherSetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); +}; + +/* AccuracyLayer + Note: not an actual loss layer! Does not implement backwards step. + Computes the accuracy and logprob of a with respect to b. +*/ +template +class AccuracyLayer : public Layer { + public: + explicit AccuracyLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_ACCURACY; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + NOT_IMPLEMENTED; + } +}; + +/* Also see +- SoftmaxWithLossLayer in vision_layers.hpp +*/ + +} // namespace caffe + +#endif // CAFFE_LOSS_LAYERS_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/net.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/net.hpp new file mode 100644 index 000000000..5b5dd22a2 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/net.hpp @@ -0,0 +1,157 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_NET_HPP_ +#define CAFFE_NET_HPP_ + +#include +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +using std::map; +using std::vector; +using std::set; +using std::string; + +namespace caffe { + + +template +class Net { + public: + explicit Net(const NetParameter& param); + explicit Net(const string& param_file); + virtual ~Net() {} + + // Initialize a network with the network parameter. + void Init(const NetParameter& param); + + // Run forward with the input blobs already fed separately. You can get the + // input blobs using input_blobs(). + const vector*>& ForwardPrefilled(Dtype* loss = NULL); + // Run forward using a set of bottom blobs, and return the result. + const vector*>& Forward(const vector* > & bottom, + Dtype* loss = NULL); + // Run forward using a serialized BlobProtoVector and return the result + // as a serialized BlobProtoVector + string Forward(const string& input_blob_protos, Dtype* loss = NULL); + + // The network backward should take no input and output, since it solely + // computes the gradient w.r.t the parameters, and the data has already + // been provided during the forward pass. + void Backward(); + + Dtype ForwardBackward(const vector* > & bottom) { + Dtype loss; + Forward(bottom, &loss); + Backward(); + return loss; + } + + // Updates the network weights based on the diff values computed. + void Update(); + + // For an already initialized net, ShareTrainedLayersWith() implicitly copies + // (i.e., using no additional memory) the already trained layers from another + // Net. + void ShareTrainedLayersWith(Net* other); + // For an already initialized net, CopyTrainedLayersFrom() copies the already + // trained layers from another net parameter instance. + void CopyTrainedLayersFrom(const NetParameter& param); + void CopyTrainedLayersFrom(const string trained_filename); + // Writes the net to a proto. + void ToProto(NetParameter* param, bool write_diff = false); + + // returns the network name. + inline const string& name() { return name_; } + // returns the layer names + inline const vector& layer_names() { return layer_names_; } + // returns the blob names + inline const vector& blob_names() { return blob_names_; } + // returns the blobs + inline const vector > >& blobs() { return blobs_; } + // returns the layers + inline const vector > >& layers() { return layers_; } + // returns the bottom and top vecs for each layer - usually you won't need + // this unless you do per-layer checks such as gradients. + inline vector*> >& bottom_vecs() { return bottom_vecs_; } + inline vector*> >& top_vecs() { return top_vecs_; } + // returns the parameters + inline vector > >& params() { return params_; } + // returns the parameter learning rate multipliers + inline vector& params_lr() {return params_lr_; } + inline vector& params_weight_decay() { return params_weight_decay_; } + // Input and output blob numbers + inline int num_inputs() { return net_input_blobs_.size(); } + inline int num_outputs() { return net_output_blobs_.size(); } + inline vector*>& input_blobs() { return net_input_blobs_; } + inline vector*>& output_blobs() { return net_output_blobs_; } + inline vector& input_blob_indices() { return net_input_blob_indices_; } + inline vector& output_blob_indices() { return net_output_blob_indices_; } + // has_blob and blob_by_name are inspired by + // https://github.com/kencoken/caffe/commit/f36e71569455c9fbb4bf8a63c2d53224e32a4e7b + // Access intermediary computation layers, testing with centre image only + bool has_blob(const string& blob_name); + const shared_ptr > blob_by_name(const string& blob_name); + bool has_layer(const string& layer_name); + const shared_ptr > layer_by_name(const string& layer_name); + + protected: + // Helpers for Init. + // Append a new input or top blob to the net. + void AppendTop(const NetParameter& param, const int layer_id, + const int top_id, set* available_blobs, + map* blob_name_to_idx); + // Append a new bottom blob to the net. + int AppendBottom(const NetParameter& param, const int layer_id, + const int bottom_id, set* available_blobs, + map* blob_name_to_idx); + // Function to get misc parameters, e.g. the learning rate multiplier and + // weight decay. + void GetLearningRateAndWeightDecay(); + + // Individual layers in the net + vector > > layers_; + vector layer_names_; + map layer_names_index_; + vector layer_need_backward_; + // blobs stores the blobs that store intermediate results between the + // layers. + vector > > blobs_; + vector blob_names_; + map blob_names_index_; + vector blob_need_backward_; + // bottom_vecs stores the vectors containing the input for each layer. + // They don't actually host the blobs (blobs_ does), so we simply store + // pointers. + vector*> > bottom_vecs_; + vector > bottom_id_vecs_; + // top_vecs stores the vectors containing the output for each layer + vector*> > top_vecs_; + vector > top_id_vecs_; + // blob indices for the input and the output of the net + vector net_input_blob_indices_; + vector net_output_blob_indices_; + vector*> net_input_blobs_; + vector*> net_output_blobs_; + string name_; + // The parameters in the network. + vector > > params_; + // the learning rate multipliers + vector params_lr_; + // the weight decay multipliers + vector params_weight_decay_; + // The bytes of memory used by this net + size_t memory_used_; + DISABLE_COPY_AND_ASSIGN(Net); +}; + + +} // namespace caffe + +#endif // CAFFE_NET_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/neuron_layers.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/neuron_layers.hpp new file mode 100644 index 000000000..ed664df75 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/neuron_layers.hpp @@ -0,0 +1,272 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_NEURON_LAYERS_HPP_ +#define CAFFE_NEURON_LAYERS_HPP_ + +#include +#include +#include + +#include "leveldb/db.h" +#include "pthread.h" +#include "boost/scoped_ptr.hpp" +#include "hdf5.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#define HDF5_DATA_DATASET_NAME "data" +#define HDF5_DATA_LABEL_NAME "label" + +namespace caffe { + +/* NeuronLayer + An interface for layers that take one blob as input (x), + and produce one blob as output (y). +*/ +template +class NeuronLayer : public Layer { + public: + explicit NeuronLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_NONE; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } +}; + +/* BNLLLayer + + y = x + log(1 + exp(-x)) if x > 0 + y = log(1 + exp(x)) if x <= 0 + + y' = exp(x) / (exp(x) + 1) +*/ +template +class BNLLLayer : public NeuronLayer { + public: + explicit BNLLLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_BNLL; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); +}; + +/* DropoutLayer + During training only, sets some portion of x to 0, adjusting the + vector magnitude accordingly. + + mask = bernoulli(1 - threshold) + scale = 1 / (1 - threshold) + y = x * mask * scale + + y' = mask * scale +*/ +template +class DropoutLayer : public NeuronLayer { + public: + explicit DropoutLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_DROPOUT; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + shared_ptr > rand_vec_; + Dtype threshold_; + Dtype scale_; + unsigned int uint_thres_; +}; + +/* PowerLayer + y = (shift + scale * x) ^ power + + y' = scale * power * (shift + scale * x) ^ (power - 1) + = scale * power * y / (shift + scale * x) +*/ +template +class PowerLayer : public NeuronLayer { + public: + explicit PowerLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_POWER; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + Dtype power_; + Dtype scale_; + Dtype shift_; + Dtype diff_scale_; +}; + +/* ReLULayer + Rectified Linear Unit non-linearity. + The simple max is fast to compute, and the function does not saturate. + + y = max(0, x). + + y' = 0 if x < 0 + y' = 1 if x > 0 +*/ +template +class ReLULayer : public NeuronLayer { + public: + explicit ReLULayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_RELU; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); +}; + +/* SigmoidLayer + Sigmoid function non-linearity, a classic choice in neural networks. + Note that the gradient vanishes as the values move away from 0. + The ReLULayer is often a better choice for this reason. + + y = 1. / (1 + exp(-x)) + + y ' = exp(x) / (1 + exp(x))^2 + or + y' = y * (1 - y) +*/ +template +class SigmoidLayer : public NeuronLayer { + public: + explicit SigmoidLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SIGMOID; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); +}; + +/* TanHLayer + Hyperbolic tangent non-linearity, popular in auto-encoders. + + y = 1. * (exp(2x) - 1) / (exp(2x) + 1) + + y' = 1 - ( (exp(2x) - 1) / (exp(2x) + 1) ) ^ 2 +*/ +template +class TanHLayer : public NeuronLayer { + public: + explicit TanHLayer(const LayerParameter& param) + : NeuronLayer(param) {} + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_TANH; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); +}; + +/* ThresholdLayer + Outputs 1 if value in input is above threshold, 0 otherwise. + The defult threshold = 0, which means positive values would become 1 and + negative or 0, would become 0 + + y = 1 if x > threshold + y = 0 if x <= threshold + + y' = don't differenciable +*/ +template +class ThresholdLayer : public NeuronLayer { + public: + explicit ThresholdLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_THRESHOLD; + } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + NOT_IMPLEMENTED; + } + + Dtype threshold_; +}; + +} // namespace caffe + +#endif // CAFFE_NEURON_LAYERS_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/solver.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/solver.hpp new file mode 100644 index 000000000..3112c59e0 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/solver.hpp @@ -0,0 +1,77 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_OPTIMIZATION_SOLVER_HPP_ +#define CAFFE_OPTIMIZATION_SOLVER_HPP_ + +#include +#include + +namespace caffe { + +template +class Solver { + public: + explicit Solver(const SolverParameter& param); + explicit Solver(const string& param_file); + void Init(const SolverParameter& param); + // The main entry of the solver function. In default, iter will be zero. Pass + // in a non-zero iter number to resume training for a pre-trained net. + virtual void Solve(const char* resume_file = NULL); + inline void Solve(const string resume_file) { Solve(resume_file.c_str()); } + virtual ~Solver() {} + inline shared_ptr > net() { return net_; } + + protected: + // PreSolve is run before any solving iteration starts, allowing one to + // put up some scaffold. + virtual void PreSolve() {} + // Get the update value for the current iteration. + virtual void ComputeUpdateValue() = 0; + // The Solver::Snapshot function implements the basic snapshotting utility + // that stores the learned net. You should implement the SnapshotSolverState() + // function that produces a SolverState protocol buffer that needs to be + // written to disk together with the learned net. + void Snapshot(); + // The test routine + void TestAll(); + void Test(const int test_net_id = 0); + virtual void SnapshotSolverState(SolverState* state) = 0; + // The Restore function implements how one should restore the solver to a + // previously snapshotted state. You should implement the RestoreSolverState() + // function that restores the state from a SolverState protocol buffer. + void Restore(const char* resume_file); + virtual void RestoreSolverState(const SolverState& state) = 0; + + SolverParameter param_; + int iter_; + shared_ptr > net_; + vector > > test_nets_; + + DISABLE_COPY_AND_ASSIGN(Solver); +}; + + +template +class SGDSolver : public Solver { + public: + explicit SGDSolver(const SolverParameter& param) + : Solver(param) {} + explicit SGDSolver(const string& param_file) + : Solver(param_file) {} + + protected: + virtual void PreSolve(); + Dtype GetLearningRate(); + virtual void ComputeUpdateValue(); + virtual void SnapshotSolverState(SolverState * state); + virtual void RestoreSolverState(const SolverState& state); + // history maintains the historical momentum data. + vector > > history_; + + DISABLE_COPY_AND_ASSIGN(SGDSolver); +}; + + +} // namespace caffe + +#endif // CAFFE_OPTIMIZATION_SOLVER_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/syncedmem.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/syncedmem.hpp new file mode 100644 index 000000000..bed55c380 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/syncedmem.hpp @@ -0,0 +1,67 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_SYNCEDMEM_HPP_ +#define CAFFE_SYNCEDMEM_HPP_ + +#include + +#include "caffe/common.hpp" + +namespace caffe { + +// Theoretically, CaffeMallocHost and CaffeFreeHost should simply call the +// cudaMallocHost and cudaFree functions in order to create pinned memory. +// However, those codes rely on the existence of a cuda GPU (I don't know +// why that is a must since allocating memory should not be accessing the +// GPU resorce, but it just creates an error as of Cuda 5.0) and will cause +// problem when running on a machine without GPU. Thus, we simply define +// these two functions for safety and possible future change if the problem +// of calling cuda functions disappears in a future version. +// +// In practice, although we are creating unpinned memory here, as long as we +// are constantly accessing them the memory pages almost always stays in +// the physical memory (assuming we have large enough memory installed), and +// does not seem to create a memory bottleneck here. + +inline void CaffeMallocHost(void** ptr, size_t size) { + *ptr = malloc(size); +} + +inline void CaffeFreeHost(void* ptr) { + free(ptr); +} + + +class SyncedMemory { + public: + SyncedMemory() + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), + own_cpu_data_(false) {} + explicit SyncedMemory(size_t size) + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), + own_cpu_data_(false) {} + ~SyncedMemory(); + const void* cpu_data(); + void set_cpu_data(void* data); + const void* gpu_data(); + void* mutable_cpu_data(); + void* mutable_gpu_data(); + enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }; + SyncedHead head() { return head_; } + size_t size() { return size_; } + + private: + void to_cpu(); + void to_gpu(); + void* cpu_ptr_; + void* gpu_ptr_; + size_t size_; + SyncedHead head_; + bool own_cpu_data_; + + DISABLE_COPY_AND_ASSIGN(SyncedMemory); +}; // class SyncedMemory + +} // namespace caffe + +#endif // CAFFE_SYNCEDMEM_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/benchmark.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/benchmark.hpp new file mode 100644 index 000000000..1d26314c6 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/benchmark.hpp @@ -0,0 +1,39 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_UTIL_BENCHMARK_H_ +#define CAFFE_UTIL_BENCHMARK_H_ + +#include +#include + +namespace caffe { + +class Timer { + public: + Timer(); + virtual ~Timer(); + void Start(); + void Stop(); + float MilliSeconds(); + float Seconds(); + + inline bool initted() { return initted_; } + inline bool running() { return running_; } + inline bool has_run_at_least_once() { return has_run_at_least_once_; } + + protected: + void Init(); + + bool initted_; + bool running_; + bool has_run_at_least_once_; + cudaEvent_t start_gpu_; + cudaEvent_t stop_gpu_; + boost::posix_time::ptime start_cpu_; + boost::posix_time::ptime stop_cpu_; + float elapsed_milliseconds_; +}; + +} // namespace caffe + +#endif // CAFFE_UTIL_BENCHMARK_H_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/format.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/format.hpp new file mode 100644 index 000000000..fa3d89caf --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/format.hpp @@ -0,0 +1,25 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_UTIL_FORMAT_H_ +#define CAFFE_UTIL_FORMAT_H_ + +#include +#include + +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +bool OpenCVImageToDatum( + const cv::Mat& image, const int label, const int height, + const int width, const bool is_color, Datum* datum); + +inline bool OpenCVImageToDatum( + const cv::Mat& image, const int label, const int height, + const int width, Datum* datum) { + return OpenCVImageToDatum(image, label, height, width, true, datum); +} + +} // namespace caffe + +#endif // CAFFE_UTIL_FORMAT_H_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/im2col.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/im2col.hpp new file mode 100644 index 000000000..a649d8cc4 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/im2col.hpp @@ -0,0 +1,30 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef _CAFFE_UTIL_IM2COL_HPP_ +#define _CAFFE_UTIL_IM2COL_HPP_ + +namespace caffe { + +template +void im2col_cpu(const Dtype* data_im, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_col); + +template +void col2im_cpu(const Dtype* data_col, const int channels, + const int height, const int width, const int psize, const int pad, + const int stride, Dtype* data_im); + +template +void im2col_gpu(const Dtype* data_im, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_col); + +template +void col2im_gpu(const Dtype* data_col, const int channels, + const int height, const int width, const int psize, const int pad, + const int stride, Dtype* data_im); + +} // namespace caffe + +#endif // CAFFE_UTIL_IM2COL_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/insert_splits.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/insert_splits.hpp new file mode 100644 index 000000000..e25cdd7fa --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/insert_splits.hpp @@ -0,0 +1,31 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ +#define _CAFFE_UTIL_INSERT_SPLITS_HPP_ + +#include + +#include "caffe/proto/caffe.pb.h" + +using std::pair; +using std::string; + +namespace caffe { + +// Copy NetParameters with SplitLayers added to replace any shared bottom +// blobs with unique bottom blobs provided by the SplitLayer. +void InsertSplits(const NetParameter& param, NetParameter* param_split); + +void ConfigureSplitLayer(const string& layer_name, const string& blob_name, + const int blob_idx, const int split_count, + LayerParameter* split_layer_param); + +string SplitLayerName(const string& layer_name, const string& blob_name, + const int blob_idx); + +string SplitBlobName(const string& layer_name, const string& blob_name, + const int blob_idx, const int split_idx); + +} // namespace caffe + +#endif // CAFFE_UTIL_INSERT_SPLITS_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/io.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/io.hpp new file mode 100644 index 000000000..4458096ef --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/io.hpp @@ -0,0 +1,93 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_UTIL_IO_H_ +#define CAFFE_UTIL_IO_H_ + +#include + +#include "google/protobuf/message.h" +#include "hdf5.h" +#include "hdf5_hl.h" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/blob.hpp" + +using std::string; +using ::google::protobuf::Message; + +#define HDF5_NUM_DIMS 4 + +namespace caffe { + +bool ReadProtoFromTextFile(const char* filename, Message* proto); + +inline bool ReadProtoFromTextFile(const string& filename, Message* proto) { + return ReadProtoFromTextFile(filename.c_str(), proto); +} + +inline void ReadProtoFromTextFileOrDie(const char* filename, Message* proto) { + CHECK(ReadProtoFromTextFile(filename, proto)); +} + +inline void ReadProtoFromTextFileOrDie(const string& filename, Message* proto) { + ReadProtoFromTextFileOrDie(filename.c_str(), proto); +} + +void WriteProtoToTextFile(const Message& proto, const char* filename); +inline void WriteProtoToTextFile(const Message& proto, const string& filename) { + WriteProtoToTextFile(proto, filename.c_str()); +} + +bool ReadProtoFromBinaryFile(const char* filename, Message* proto); + +inline bool ReadProtoFromBinaryFile(const string& filename, Message* proto) { + return ReadProtoFromBinaryFile(filename.c_str(), proto); +} + +inline void ReadProtoFromBinaryFileOrDie(const char* filename, Message* proto) { + CHECK(ReadProtoFromBinaryFile(filename, proto)); +} + +inline void ReadProtoFromBinaryFileOrDie(const string& filename, + Message* proto) { + ReadProtoFromBinaryFileOrDie(filename.c_str(), proto); +} + + +void WriteProtoToBinaryFile(const Message& proto, const char* filename); +inline void WriteProtoToBinaryFile( + const Message& proto, const string& filename) { + WriteProtoToBinaryFile(proto, filename.c_str()); +} + +bool ReadImageToDatum(const string& filename, const int label, + const int height, const int width, const bool is_color, Datum* datum); + +inline bool ReadImageToDatum(const string& filename, const int label, + const int height, const int width, Datum* datum) { + return ReadImageToDatum(filename, label, height, width, true, datum); +} + +inline bool ReadImageToDatum(const string& filename, const int label, + Datum* datum) { + return ReadImageToDatum(filename, label, 0, 0, datum); +} + + +template +void hdf5_load_nd_dataset_helper( + hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, + Blob* blob); + +template +void hdf5_load_nd_dataset( + hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, + Blob* blob); + +template +void hdf5_save_nd_dataset( + const hid_t file_id, const string dataset_name, const Blob& blob); + +} // namespace caffe + +#endif // CAFFE_UTIL_IO_H_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/math_functions.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/math_functions.hpp new file mode 100644 index 000000000..995199742 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/math_functions.hpp @@ -0,0 +1,253 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_UTIL_MATH_FUNCTIONS_H_ +#define CAFFE_UTIL_MATH_FUNCTIONS_H_ + +#include +#include +#include // for std::fabs and std::signbit + +#include "glog/logging.h" + +#include "caffe/util/mkl_alternate.hpp" + +namespace caffe { + +// Decaf gemm provides a simpler interface to the gemm functions, with the +// limitation that the data has to be contiguous in memory. +template +void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta, + Dtype* C); + +// Decaf gpu gemm provides an interface that is almost the same as the cpu +// gemm function - following the c convention and calling the fortran-order +// gpu code under the hood. +template +void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta, + Dtype* C); + +template +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, + const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, + Dtype* y); + +template +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, + const Dtype alpha, const Dtype* A, const Dtype* x, const Dtype beta, + Dtype* y); + +template +void caffe_axpy(const int N, const Dtype alpha, const Dtype* X, + Dtype* Y); + +template +void caffe_gpu_axpy(const int N, const Dtype alpha, const Dtype* X, + Dtype* Y); + +template +void caffe_cpu_axpby(const int N, const Dtype alpha, const Dtype* X, + const Dtype beta, Dtype* Y); + +template +void caffe_gpu_axpby(const int N, const Dtype alpha, const Dtype* X, + const Dtype beta, Dtype* Y); + +template +void caffe_copy(const int N, const Dtype *X, Dtype *Y); + +template +void caffe_set(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_gpu_set(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_gpu_copy(const int N, const Dtype *X, Dtype *Y); + +template +void caffe_add_scalar(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_gpu_add_scalar(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_scal(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_gpu_scal(const int N, const Dtype alpha, Dtype *X); + +template +void caffe_sqr(const int N, const Dtype* a, Dtype* y); + +template +void caffe_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_gpu_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_gpu_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_gpu_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_gpu_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); + +template +void caffe_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); + +template +void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); + +unsigned int caffe_rng_rand(); + +template +Dtype caffe_nextafter(const Dtype b); + +template +void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); + +// caffe_gpu_rng_uniform with two arguments generates integers in the range +// [0, UINT_MAX]. +void caffe_gpu_rng_uniform(const int n, unsigned int* r); + +// caffe_gpu_rng_uniform with four arguments generates floats in the range +// (a, b] (strictly greater than a, less than or equal to b) due to the +// specification of curandGenerateUniform. With a = 0, b = 1, just calls +// curandGenerateUniform; with other limits will shift and scale the outputs +// appropriately after calling curandGenerateUniform. +template +void caffe_gpu_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); + +template +void caffe_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, + Dtype* r); + +template +void caffe_gpu_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, + Dtype* r); + +template +void caffe_rng_bernoulli(const int n, const Dtype p, int* r); + +template +void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); + +template +void caffe_gpu_rng_bernoulli(const int n, const Dtype p, int* r); + +template +void caffe_exp(const int n, const Dtype* a, Dtype* y); + +template +Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y); + +template +void caffe_gpu_dot(const int n, const Dtype* x, const Dtype* y, Dtype* out); + +template +int caffe_cpu_hamming_distance(const int n, const Dtype* x, const Dtype* y); + +template +uint32_t caffe_gpu_hamming_distance(const int n, const Dtype* x, + const Dtype* y); + +// Returns the sum of the absolute values of the elements of vector x +template +Dtype caffe_cpu_asum(const int n, const Dtype* x); + +template +void caffe_gpu_asum(const int n, const Dtype* x, Dtype* y); + +// the branchless, type-safe version from +// http://stackoverflow.com/questions/1903954/is-there-a-standard-sign-function-signum-sgn-in-c-c +template +inline char caffe_sign(Dtype val) { + return (Dtype(0) < val) - (val < Dtype(0)); +} + +// The following two macros are modifications of DEFINE_VSL_UNARY_FUNC +// in include/caffe/util/mkl_alternate.hpp authored by @Rowland Depp. +// Please refer to commit 7e8ef25c7 of the boost-eigen branch. +// Git cherry picking that commit caused a conflict hard to resolve and +// copying that file in convenient for code reviewing. +// So they have to be pasted here temporarily. +#define DEFINE_CAFFE_CPU_UNARY_FUNC(name, operation) \ + template \ + void caffe_cpu_##name(const int n, const Dtype* x, Dtype* y) { \ + CHECK_GT(n, 0); CHECK(x); CHECK(y); \ + for (int i = 0; i < n; ++i) { \ + operation; \ + } \ + } + +#define INSTANTIATE_CAFFE_CPU_UNARY_FUNC(name) \ + template <> \ + void caffe_cpu_##name(const int n, const float* x, float* y); \ + template <> \ + void caffe_cpu_##name(const int n, const double* x, double* y) + + +#define DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(name, operation) \ +template \ +__global__ void name##_kernel(const int n, const Dtype* x, Dtype* y) { \ + CUDA_KERNEL_LOOP(index, n) { \ + operation; \ + } \ +} \ +template <> \ +void caffe_gpu_##name(const int n, const float* x, float* y) { \ + /* NOLINT_NEXT_LINE(whitespace/operators) */ \ + name##_kernel<<>>( \ + n, x, y); \ +} \ +template <> \ +void caffe_gpu_##name(const int n, const double* x, double* y) { \ + /* NOLINT_NEXT_LINE(whitespace/operators) */ \ + name##_kernel<<>>( \ + n, x, y); \ +} + +// output is 1 for the positives, 0 for zero, and -1 for the negatives +DEFINE_CAFFE_CPU_UNARY_FUNC(sign, y[i] = caffe_sign(x[i])); + +template +void caffe_gpu_sign(const int n, const Dtype* x, Dtype* y); + +// This returns a nonzero value if the input has its sign bit set. +// The name sngbit is meant to avoid conflicts with std::signbit in the macro +using std::signbit; +DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, y[i] = signbit(x[i])); + +template +void caffe_gpu_sgnbit(const int n, const Dtype* x, Dtype* y); + +DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i])); + +template +void caffe_gpu_fabs(const int n, const Dtype* x, Dtype* y); + +template +void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); + +template +void caffe_gpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); + +} // namespace caffe + + +#endif // CAFFE_UTIL_MATH_FUNCTIONS_H_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/mkl_alternate.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/mkl_alternate.hpp new file mode 100644 index 000000000..c30eab8d3 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/mkl_alternate.hpp @@ -0,0 +1,97 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_UTIL_MKL_ALTERNATE_H_ +#define CAFFE_UTIL_MKL_ALTERNATE_H_ + +#ifdef USE_MKL + +#include + +#else // If use MKL, simply include the MKL header + +extern "C" { +#include +} +#include + +// Functions that caffe uses but are not present if MKL is not linked. + +// A simple way to define the vsl unary functions. The operation should +// be in the form e.g. y[i] = sqrt(a[i]) +#define DEFINE_VSL_UNARY_FUNC(name, operation) \ + template \ + void v##name(const int n, const Dtype* a, Dtype* y) { \ + CHECK_GT(n, 0); CHECK(a); CHECK(y); \ + for (int i = 0; i < n; ++i) { operation; } \ + } \ + inline void vs##name( \ + const int n, const float* a, float* y) { \ + v##name(n, a, y); \ + } \ + inline void vd##name( \ + const int n, const double* a, double* y) { \ + v##name(n, a, y); \ + } + +DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); +DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); + +// A simple way to define the vsl unary functions with singular parameter b. +// The operation should be in the form e.g. y[i] = pow(a[i], b) +#define DEFINE_VSL_UNARY_FUNC_WITH_PARAM(name, operation) \ + template \ + void v##name(const int n, const Dtype* a, const Dtype b, Dtype* y) { \ + CHECK_GT(n, 0); CHECK(a); CHECK(y); \ + for (int i = 0; i < n; ++i) { operation; } \ + } \ + inline void vs##name( \ + const int n, const float* a, const float b, float* y) { \ + v##name(n, a, b, y); \ + } \ + inline void vd##name( \ + const int n, const double* a, const float b, double* y) { \ + v##name(n, a, b, y); \ + } + +DEFINE_VSL_UNARY_FUNC_WITH_PARAM(Powx, y[i] = pow(a[i], b)); + +// A simple way to define the vsl binary functions. The operation should +// be in the form e.g. y[i] = a[i] + b[i] +#define DEFINE_VSL_BINARY_FUNC(name, operation) \ + template \ + void v##name(const int n, const Dtype* a, const Dtype* b, Dtype* y) { \ + CHECK_GT(n, 0); CHECK(a); CHECK(b); CHECK(y); \ + for (int i = 0; i < n; ++i) { operation; } \ + } \ + inline void vs##name( \ + const int n, const float* a, const float* b, float* y) { \ + v##name(n, a, b, y); \ + } \ + inline void vd##name( \ + const int n, const double* a, const double* b, double* y) { \ + v##name(n, a, b, y); \ + } + +DEFINE_VSL_BINARY_FUNC(Add, y[i] = a[i] + b[i]); +DEFINE_VSL_BINARY_FUNC(Sub, y[i] = a[i] - b[i]); +DEFINE_VSL_BINARY_FUNC(Mul, y[i] = a[i] * b[i]); +DEFINE_VSL_BINARY_FUNC(Div, y[i] = a[i] / b[i]); + +// In addition, MKL comes with an additional function axpby that is not present +// in standard blas. We will simply use a two-step (inefficient, of course) way +// to mimic that. +inline void cblas_saxpby(const int N, const float alpha, const float* X, + const int incX, const float beta, float* Y, + const int incY) { + cblas_sscal(N, beta, Y, incY); + cblas_saxpy(N, alpha, X, incX, Y, incY); +} +inline void cblas_daxpby(const int N, const double alpha, const double* X, + const int incX, const double beta, double* Y, + const int incY) { + cblas_dscal(N, beta, Y, incY); + cblas_daxpy(N, alpha, X, incX, Y, incY); +} + +#endif // USE_MKL +#endif // CAFFE_UTIL_MKL_ALTERNATE_H_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/rng.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/rng.hpp new file mode 100644 index 000000000..5909d1715 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/rng.hpp @@ -0,0 +1,19 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_RNG_CPP_HPP_ +#define CAFFE_RNG_CPP_HPP_ + +#include +#include "caffe/common.hpp" + +namespace caffe { + + typedef boost::mt19937 rng_t; + + inline rng_t* caffe_rng() { + return static_cast(Caffe::rng_stream().generator()); + } + +} // namespace caffe + +#endif // CAFFE_RNG_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/util/upgrade_proto.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/util/upgrade_proto.hpp new file mode 100644 index 000000000..a1ac06097 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/util/upgrade_proto.hpp @@ -0,0 +1,49 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_UTIL_UPGRADE_PROTO_H_ +#define CAFFE_UTIL_UPGRADE_PROTO_H_ + +#include + +#include "caffe/proto/caffe.pb.h" +#include "caffe/proto/caffe_pretty_print.pb.h" + +using std::string; + +namespace caffe { + +// Return true iff any layer contains parameters specified using +// deprecated V0LayerParameter. +bool NetNeedsUpgrade(const NetParameter& net_param); + +// Perform all necessary transformations to upgrade a V0NetParameter into a +// NetParameter (including upgrading padding layers and LayerParameters). +bool UpgradeV0Net(const NetParameter& v0_net_param, NetParameter* net_param); + +// Upgrade NetParameter with padding layers to pad-aware conv layers. +// For any padding layer, remove it and put its pad parameter in any layers +// taking its top blob as input. +// Error if any of these above layers are not-conv layers. +void UpgradeV0PaddingLayers(const NetParameter& param, + NetParameter* param_upgraded_pad); + +// Upgrade a single V0LayerConnection to the new LayerParameter format. +bool UpgradeLayerParameter(const LayerParameter& v0_layer_connection, + LayerParameter* layer_param); + +LayerParameter_LayerType UpgradeV0LayerType(const string& type); + +// Convert a NetParameter to NetParameterPrettyPrint used for dumping to +// proto text files. +void NetParameterToPrettyPrint(const NetParameter& param, + NetParameterPrettyPrint* pretty_param); + +// Read parameters from a file into a NetParameter proto message. +void ReadNetParamsFromTextFileOrDie(const string& param_file, + NetParameter* param); +void ReadNetParamsFromBinaryFileOrDie(const string& param_file, + NetParameter* param); + +} // namespace caffe + +#endif // CAFFE_UTIL_UPGRADE_PROTO_H_ diff --git a/modules/dnns_easily_fooled/caffe/include/caffe/vision_layers.hpp b/modules/dnns_easily_fooled/caffe/include/caffe/vision_layers.hpp new file mode 100644 index 000000000..fc3dbbe19 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/include/caffe/vision_layers.hpp @@ -0,0 +1,479 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_VISION_LAYERS_HPP_ +#define CAFFE_VISION_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/* ArgmaxLayer + Compute the index of the max value across all (channels x height x width). + [In the future, can take specific dimension.] + Intended for use after a classification layer to produce prediction. + If parameter out_max_val is set to true, then output is a vector of pairs + (max_ind, max_val) for each image. + + NOTE: does not implement Backwards operation. +*/ +template +class ArgMaxLayer : public Layer { + public: + explicit ArgMaxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_ARGMAX; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + NOT_IMPLEMENTED; + } + bool out_max_val_; +}; + +/* ConcatLayer + Takes at least two blobs and concatenates them along either num or + channel dim, outputting the result. +*/ +template +class ConcatLayer : public Layer { + public: + explicit ConcatLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_CONCAT; + } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + Blob col_bob_; + int count_; + int num_; + int channels_; + int height_; + int width_; + int concat_dim_; +}; + +/* ConvolutionLayer +*/ +template +class ConvolutionLayer : public Layer { + public: + explicit ConvolutionLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_CONVOLUTION; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + int kernel_size_; + int stride_; + int num_; + int channels_; + int pad_; + int height_; + int width_; + int num_output_; + int group_; + Blob col_buffer_; + shared_ptr bias_multiplier_; + bool bias_term_; + int M_; + int K_; + int N_; +}; + +/* EltwiseLayer + Compute elementwise operations like product or sum. +*/ +template +class EltwiseLayer : public Layer { + public: + explicit EltwiseLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_ELTWISE; + } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + EltwiseParameter_EltwiseOp op_; + vector coeffs_; +}; + +/* FlattenLayer +*/ +template +class FlattenLayer : public Layer { + public: + explicit FlattenLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_FLATTEN; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + int count_; +}; + +/* Im2colLayer +*/ +template +class Im2colLayer : public Layer { + public: + explicit Im2colLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_IM2COL; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + int kernel_size_; + int stride_; + int channels_; + int height_; + int width_; + int pad_; +}; + +/* InnerProductLayer +*/ +template +class InnerProductLayer : public Layer { + public: + explicit InnerProductLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_INNER_PRODUCT; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + int M_; + int K_; + int N_; + bool bias_term_; + shared_ptr bias_multiplier_; +}; + +// Forward declare PoolingLayer and SplitLayer for use in LRNLayer. +template class PoolingLayer; +template class SplitLayer; + +/* LRNLayer + Local Response Normalization +*/ +template +class LRNLayer : public Layer { + public: + explicit LRNLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_LRN; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + virtual Dtype CrossChannelForward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype CrossChannelForward_gpu(const vector*>& bottom, + vector*>* top); + virtual Dtype WithinChannelForward(const vector*>& bottom, + vector*>* top); + virtual void CrossChannelBackward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void CrossChannelBackward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void WithinChannelBackward(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + int size_; + int pre_pad_; + Dtype alpha_; + Dtype beta_; + int num_; + int channels_; + int height_; + int width_; + + // Fields used for normalization ACROSS_CHANNELS + // scale_ stores the intermediate summing results + Blob scale_; + + // Fields used for normalization WITHIN_CHANNEL + shared_ptr > split_layer_; + vector*> split_top_vec_; + shared_ptr > square_layer_; + Blob square_input_; + Blob square_output_; + vector*> square_bottom_vec_; + vector*> square_top_vec_; + shared_ptr > pool_layer_; + Blob pool_output_; + vector*> pool_top_vec_; + shared_ptr > power_layer_; + Blob power_output_; + vector*> power_top_vec_; + shared_ptr > product_layer_; + Blob product_data_input_; + vector*> product_bottom_vec_; +}; + +/* PoolingLayer +*/ +template +class PoolingLayer : public Layer { + public: + explicit PoolingLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_POOLING; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return max_top_blobs_; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + int max_top_blobs_; + int kernel_size_; + int stride_; + int pad_; + int channels_; + int height_; + int width_; + int pooled_height_; + int pooled_width_; + Blob rand_idx_; + shared_ptr > max_idx_; +}; + +/* SoftmaxLayer +*/ +template +class SoftmaxLayer : public Layer { + public: + explicit SoftmaxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SOFTMAX; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + // sum_multiplier is just used to carry out sum using blas + Blob sum_multiplier_; + // scale is an intermediate blob to hold temporary results. + Blob scale_; +}; + +/* SoftmaxWithLossLayer + Implements softmax and computes the loss. + + It is preferred over separate softmax + multinomiallogisticloss + layers due to more numerically stable gradients. + + In test, this layer could be replaced by simple softmax layer. +*/ +template +class SoftmaxWithLossLayer : public Layer { + public: + explicit SoftmaxWithLossLayer(const LayerParameter& param) + : Layer(param), softmax_layer_(new SoftmaxLayer(param)) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SOFTMAX_LOSS; + } + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + shared_ptr > softmax_layer_; + // prob stores the output probability of the layer. + Blob prob_; + // Vector holders to call the underlying softmax layer forward and backward. + vector*> softmax_bottom_vec_; + vector*> softmax_top_vec_; +}; + +/* SplitLayer +*/ +template +class SplitLayer : public Layer { + public: + explicit SplitLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + virtual inline LayerParameter_LayerType type() const { + return LayerParameter_LayerType_SPLIT; + } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + int count_; +}; + +} // namespace caffe + +#endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/matlab/caffe/ilsvrc_2012_mean.mat b/modules/dnns_easily_fooled/caffe/matlab/caffe/ilsvrc_2012_mean.mat new file mode 100644 index 000000000..f1da25c84 Binary files /dev/null and b/modules/dnns_easily_fooled/caffe/matlab/caffe/ilsvrc_2012_mean.mat differ diff --git a/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe.cpp b/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe.cpp new file mode 100644 index 000000000..21f51e839 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe.cpp @@ -0,0 +1,370 @@ +// Copyright 2014 BVLC and contributors. +// +// matcaffe.cpp provides a wrapper of the caffe::Net class as well as some +// caffe::Caffe functions so that one could easily call it from matlab. +// Note that for matlab, we will simply use float as the data type. + +#include +#include + +#include "mex.h" +#include "caffe/caffe.hpp" + +#define MEX_ARGS int nlhs, mxArray **plhs, int nrhs, const mxArray **prhs + +using namespace caffe; // NOLINT(build/namespaces) + +// The pointer to the internal caffe::Net instance +static shared_ptr > net_; +static int init_key = -2; + +// Five things to be aware of: +// caffe uses row-major order +// matlab uses column-major order +// caffe uses BGR color channel order +// matlab uses RGB color channel order +// images need to have the data mean subtracted +// +// Data coming in from matlab needs to be in the order +// [width, height, channels, images] +// where width is the fastest dimension. +// Here is the rough matlab for putting image data into the correct +// format: +// % convert from uint8 to single +// im = single(im); +// % reshape to a fixed size (e.g., 227x227) +// im = imresize(im, [IMAGE_DIM IMAGE_DIM], 'bilinear'); +// % permute from RGB to BGR and subtract the data mean (already in BGR) +// im = im(:,:,[3 2 1]) - data_mean; +// % flip width and height to make width the fastest dimension +// im = permute(im, [2 1 3]); +// +// If you have multiple images, cat them with cat(4, ...) +// +// The actual forward function. It takes in a cell array of 4-D arrays as +// input and outputs a cell array. + +static mxArray* do_forward(const mxArray* const bottom) { + vector*>& input_blobs = net_->input_blobs(); + CHECK_EQ(static_cast(mxGetDimensions(bottom)[0]), + input_blobs.size()); + for (unsigned int i = 0; i < input_blobs.size(); ++i) { + const mxArray* const elem = mxGetCell(bottom, i); + const float* const data_ptr = + reinterpret_cast(mxGetPr(elem)); + switch (Caffe::mode()) { + case Caffe::CPU: + memcpy(input_blobs[i]->mutable_cpu_data(), data_ptr, + sizeof(float) * input_blobs[i]->count()); + break; + case Caffe::GPU: + cudaMemcpy(input_blobs[i]->mutable_gpu_data(), data_ptr, + sizeof(float) * input_blobs[i]->count(), cudaMemcpyHostToDevice); + break; + default: + LOG(FATAL) << "Unknown Caffe mode."; + } // switch (Caffe::mode()) + } + const vector*>& output_blobs = net_->ForwardPrefilled(); + mxArray* mx_out = mxCreateCellMatrix(output_blobs.size(), 1); + for (unsigned int i = 0; i < output_blobs.size(); ++i) { + // internally data is stored as (width, height, channels, num) + // where width is the fastest dimension + mwSize dims[4] = {output_blobs[i]->width(), output_blobs[i]->height(), + output_blobs[i]->channels(), output_blobs[i]->num()}; + mxArray* mx_blob = mxCreateNumericArray(4, dims, mxSINGLE_CLASS, mxREAL); + mxSetCell(mx_out, i, mx_blob); + float* data_ptr = reinterpret_cast(mxGetPr(mx_blob)); + switch (Caffe::mode()) { + case Caffe::CPU: + memcpy(data_ptr, output_blobs[i]->cpu_data(), + sizeof(float) * output_blobs[i]->count()); + break; + case Caffe::GPU: + cudaMemcpy(data_ptr, output_blobs[i]->gpu_data(), + sizeof(float) * output_blobs[i]->count(), cudaMemcpyDeviceToHost); + break; + default: + LOG(FATAL) << "Unknown Caffe mode."; + } // switch (Caffe::mode()) + } + + return mx_out; +} + +static mxArray* do_backward(const mxArray* const top_diff) { + vector*>& output_blobs = net_->output_blobs(); + vector*>& input_blobs = net_->input_blobs(); + CHECK_EQ(static_cast(mxGetDimensions(top_diff)[0]), + output_blobs.size()); + // First, copy the output diff + for (unsigned int i = 0; i < output_blobs.size(); ++i) { + const mxArray* const elem = mxGetCell(top_diff, i); + const float* const data_ptr = + reinterpret_cast(mxGetPr(elem)); + switch (Caffe::mode()) { + case Caffe::CPU: + memcpy(output_blobs[i]->mutable_cpu_diff(), data_ptr, + sizeof(float) * output_blobs[i]->count()); + break; + case Caffe::GPU: + cudaMemcpy(output_blobs[i]->mutable_gpu_diff(), data_ptr, + sizeof(float) * output_blobs[i]->count(), cudaMemcpyHostToDevice); + break; + default: + LOG(FATAL) << "Unknown Caffe mode."; + } // switch (Caffe::mode()) + } + // LOG(INFO) << "Start"; + net_->Backward(); + // LOG(INFO) << "End"; + mxArray* mx_out = mxCreateCellMatrix(input_blobs.size(), 1); + for (unsigned int i = 0; i < input_blobs.size(); ++i) { + // internally data is stored as (width, height, channels, num) + // where width is the fastest dimension + mwSize dims[4] = {input_blobs[i]->width(), input_blobs[i]->height(), + input_blobs[i]->channels(), input_blobs[i]->num()}; + mxArray* mx_blob = mxCreateNumericArray(4, dims, mxSINGLE_CLASS, mxREAL); + mxSetCell(mx_out, i, mx_blob); + float* data_ptr = reinterpret_cast(mxGetPr(mx_blob)); + switch (Caffe::mode()) { + case Caffe::CPU: + memcpy(data_ptr, input_blobs[i]->cpu_diff(), + sizeof(float) * input_blobs[i]->count()); + break; + case Caffe::GPU: + cudaMemcpy(data_ptr, input_blobs[i]->gpu_diff(), + sizeof(float) * input_blobs[i]->count(), cudaMemcpyDeviceToHost); + break; + default: + LOG(FATAL) << "Unknown Caffe mode."; + } // switch (Caffe::mode()) + } + + return mx_out; +} + +static mxArray* do_get_weights() { + const vector > >& layers = net_->layers(); + const vector& layer_names = net_->layer_names(); + + // Step 1: count the number of layers with weights + int num_layers = 0; + { + string prev_layer_name = ""; + for (unsigned int i = 0; i < layers.size(); ++i) { + vector > >& layer_blobs = layers[i]->blobs(); + if (layer_blobs.size() == 0) { + continue; + } + if (layer_names[i] != prev_layer_name) { + prev_layer_name = layer_names[i]; + num_layers++; + } + } + } + + // Step 2: prepare output array of structures + mxArray* mx_layers; + { + const mwSize dims[2] = {num_layers, 1}; + const char* fnames[2] = {"weights", "layer_names"}; + mx_layers = mxCreateStructArray(2, dims, 2, fnames); + } + + // Step 3: copy weights into output + { + string prev_layer_name = ""; + int mx_layer_index = 0; + for (unsigned int i = 0; i < layers.size(); ++i) { + vector > >& layer_blobs = layers[i]->blobs(); + if (layer_blobs.size() == 0) { + continue; + } + + mxArray* mx_layer_cells = NULL; + if (layer_names[i] != prev_layer_name) { + prev_layer_name = layer_names[i]; + const mwSize dims[2] = {layer_blobs.size(), 1}; + mx_layer_cells = mxCreateCellArray(2, dims); + mxSetField(mx_layers, mx_layer_index, "weights", mx_layer_cells); + mxSetField(mx_layers, mx_layer_index, "layer_names", + mxCreateString(layer_names[i].c_str())); + mx_layer_index++; + } + + for (unsigned int j = 0; j < layer_blobs.size(); ++j) { + // internally data is stored as (width, height, channels, num) + // where width is the fastest dimension + mwSize dims[4] = {layer_blobs[j]->width(), layer_blobs[j]->height(), + layer_blobs[j]->channels(), layer_blobs[j]->num()}; + + mxArray* mx_weights = + mxCreateNumericArray(4, dims, mxSINGLE_CLASS, mxREAL); + mxSetCell(mx_layer_cells, j, mx_weights); + float* weights_ptr = reinterpret_cast(mxGetPr(mx_weights)); + + switch (Caffe::mode()) { + case Caffe::CPU: + memcpy(weights_ptr, layer_blobs[j]->cpu_data(), + sizeof(float) * layer_blobs[j]->count()); + break; + case Caffe::GPU: + CUDA_CHECK(cudaMemcpy(weights_ptr, layer_blobs[j]->gpu_data(), + sizeof(float) * layer_blobs[j]->count(), cudaMemcpyDeviceToHost)); + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } + } + } + } + + return mx_layers; +} + +static void get_weights(MEX_ARGS) { + plhs[0] = do_get_weights(); +} + +static void set_mode_cpu(MEX_ARGS) { + Caffe::set_mode(Caffe::CPU); +} + +static void set_mode_gpu(MEX_ARGS) { + Caffe::set_mode(Caffe::GPU); +} + +static void set_phase_train(MEX_ARGS) { + Caffe::set_phase(Caffe::TRAIN); +} + +static void set_phase_test(MEX_ARGS) { + Caffe::set_phase(Caffe::TEST); +} + +static void set_device(MEX_ARGS) { + if (nrhs != 1) { + LOG(ERROR) << "Only given " << nrhs << " arguments"; + mexErrMsgTxt("Wrong number of arguments"); + } + + int device_id = static_cast(mxGetScalar(prhs[0])); + Caffe::SetDevice(device_id); +} + +static void get_init_key(MEX_ARGS) { + plhs[0] = mxCreateDoubleScalar(init_key); +} + +static void init(MEX_ARGS) { + if (nrhs != 2) { + LOG(ERROR) << "Only given " << nrhs << " arguments"; + mexErrMsgTxt("Wrong number of arguments"); + } + + char* param_file = mxArrayToString(prhs[0]); + char* model_file = mxArrayToString(prhs[1]); + + net_.reset(new Net(string(param_file))); + net_->CopyTrainedLayersFrom(string(model_file)); + + mxFree(param_file); + mxFree(model_file); + + init_key = random(); // NOLINT(caffe/random_fn) + + if (nlhs == 1) { + plhs[0] = mxCreateDoubleScalar(init_key); + } +} + +static void reset(MEX_ARGS) { + if (net_) { + net_.reset(); + init_key = -2; + LOG(INFO) << "Network reset, call init before use it again"; + } +} + +static void forward(MEX_ARGS) { + if (nrhs != 1) { + LOG(ERROR) << "Only given " << nrhs << " arguments"; + mexErrMsgTxt("Wrong number of arguments"); + } + + plhs[0] = do_forward(prhs[0]); +} + +static void backward(MEX_ARGS) { + if (nrhs != 1) { + LOG(ERROR) << "Only given " << nrhs << " arguments"; + mexErrMsgTxt("Wrong number of arguments"); + } + + plhs[0] = do_backward(prhs[0]); +} + +static void is_initialized(MEX_ARGS) { + if (!net_) { + plhs[0] = mxCreateDoubleScalar(0); + } else { + plhs[0] = mxCreateDoubleScalar(1); + } +} + +/** ----------------------------------------------------------------- + ** Available commands. + **/ +struct handler_registry { + string cmd; + void (*func)(MEX_ARGS); +}; + +static handler_registry handlers[] = { + // Public API functions + { "forward", forward }, + { "backward", backward }, + { "init", init }, + { "is_initialized", is_initialized }, + { "set_mode_cpu", set_mode_cpu }, + { "set_mode_gpu", set_mode_gpu }, + { "set_phase_train", set_phase_train }, + { "set_phase_test", set_phase_test }, + { "set_device", set_device }, + { "get_weights", get_weights }, + { "get_init_key", get_init_key }, + { "reset", reset }, + // The end. + { "END", NULL }, +}; + + +/** ----------------------------------------------------------------- + ** matlab entry point: caffe(api_command, arg1, arg2, ...) + **/ +void mexFunction(MEX_ARGS) { + if (nrhs == 0) { + LOG(ERROR) << "No API command given"; + mexErrMsgTxt("An API command is requires"); + return; + } + + { // Handle input command + char *cmd = mxArrayToString(prhs[0]); + bool dispatched = false; + // Dispatch to cmd handler + for (int i = 0; handlers[i].func != NULL; i++) { + if (handlers[i].cmd.compare(cmd) == 0) { + handlers[i].func(nlhs, plhs, nrhs-1, prhs+1); + dispatched = true; + break; + } + } + if (!dispatched) { + LOG(ERROR) << "Unknown command `" << cmd << "'"; + mexErrMsgTxt("API command not recognized"); + } + mxFree(cmd); + } +} diff --git a/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_batch.m b/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_batch.m new file mode 100644 index 000000000..3cb7f1445 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_batch.m @@ -0,0 +1,76 @@ +function [scores,list_im] = matcaffe_batch(list_im, use_gpu) +% scores = matcaffe_batch(list_im, use_gpu) +% +% Demo of the matlab wrapper using the ILSVRC network. +% +% input +% list_im list of images files +% use_gpu 1 to use the GPU, 0 to use the CPU +% +% output +% scores 1000 x num_images ILSVRC output vector +% +% You may need to do the following before you start matlab: +% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda/lib64 +% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6 +% Or the equivalent based on where things are installed on your system +% +% Usage: +% scores = matcaffe_batch({'peppers.png','onion.png'}); +% scores = matcaffe_batch('list_images.txt', 1); +if nargin < 1 + % For test purposes + list_im = {'peppers.png','onions.png'}; +end +if ischar(list_im) + %Assume it is a file contaning the list of images + filename = list_im; + list_im = read_cell(filename); +end +% Adjust the batch size to match with imagenet_deploy.prototxt +batch_size = 10; +% Adjust dim to the output size of imagenet_deploy.prototxt +dim = 1000; +disp(list_im) +if mod(length(list_im),batch_size) + warning(['Assuming batches of ' num2str(batch_size) ' images rest will be filled with zeros']) +end + +% init caffe network (spews logging info) +if exist('use_gpu', 'var') + matcaffe_init(use_gpu); +else + matcaffe_init(); +end + +d = load('ilsvrc_2012_mean'); +IMAGE_MEAN = d.image_mean; + +% prepare input + +num_images = length(list_im); +scores = zeros(dim,num_images,'single'); +num_batches = ceil(length(list_im)/batch_size) +initic=tic; +for bb = 1 : num_batches + batchtic = tic; + range = 1+batch_size*(bb-1):min(num_images,batch_size * bb); + tic + input_data = prepare_batch(list_im(range),IMAGE_MEAN,batch_size); + toc, tic + fprintf('Batch %d out of %d %.2f%% Complete ETA %.2f seconds\n',... + bb,num_batches,bb/num_batches*100,toc(initic)/bb*(num_batches-bb)); + output_data = caffe('forward', {input_data}); + toc + output_data = squeeze(output_data{1}); + scores(:,range) = output_data(:,mod(range-1,batch_size)+1); + toc(batchtic) +end +toc(initic); + +if exist('filename', 'var') + save([filename '.probs.mat'],'list_im','scores','-v7.3'); +end + + + diff --git a/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_demo.m b/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_demo.m new file mode 100644 index 000000000..a931f910c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_demo.m @@ -0,0 +1,110 @@ +function [scores, maxlabel] = matcaffe_demo(im, use_gpu) +% scores = matcaffe_demo(im, use_gpu) +% +% Demo of the matlab wrapper using the ILSVRC network. +% +% input +% im color image as uint8 HxWx3 +% use_gpu 1 to use the GPU, 0 to use the CPU +% +% output +% scores 1000-dimensional ILSVRC score vector +% +% You may need to do the following before you start matlab: +% $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64 +% $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6 +% Or the equivalent based on where things are installed on your system +% +% Usage: +% im = imread('../../examples/images/cat.jpg'); +% scores = matcaffe_demo(im, 1); +% [score, class] = max(scores); +% Five things to be aware of: +% caffe uses row-major order +% matlab uses column-major order +% caffe uses BGR color channel order +% matlab uses RGB color channel order +% images need to have the data mean subtracted + +% Data coming in from matlab needs to be in the order +% [width, height, channels, images] +% where width is the fastest dimension. +% Here is the rough matlab for putting image data into the correct +% format: +% % convert from uint8 to single +% im = single(im); +% % reshape to a fixed size (e.g., 227x227) +% im = imresize(im, [IMAGE_DIM IMAGE_DIM], 'bilinear'); +% % permute from RGB to BGR and subtract the data mean (already in BGR) +% im = im(:,:,[3 2 1]) - data_mean; +% % flip width and height to make width the fastest dimension +% im = permute(im, [2 1 3]); + +% If you have multiple images, cat them with cat(4, ...) + +% The actual forward function. It takes in a cell array of 4-D arrays as +% input and outputs a cell array. + + +% init caffe network (spews logging info) +if exist('use_gpu', 'var') + matcaffe_init(use_gpu); +else + matcaffe_init(); +end + +if nargin < 1 + % For demo purposes we will use the peppers image + im = imread('peppers.png'); +end + +% prepare oversampled input +% input_data is Height x Width x Channel x Num +tic; +input_data = {prepare_image(im)}; +toc; + +% do forward pass to get scores +% scores are now Width x Height x Channels x Num +tic; +scores = caffe('forward', input_data); +toc; + +scores = scores{1}; +size(scores) +scores = squeeze(scores); +scores = mean(scores,2); + +[~,maxlabel] = max(scores); + +% ------------------------------------------------------------------------ +function images = prepare_image(im) +% ------------------------------------------------------------------------ +d = load('ilsvrc_2012_mean'); +IMAGE_MEAN = d.image_mean; +IMAGE_DIM = 256; +CROPPED_DIM = 227; + +% resize to fixed input size +im = single(im); +im = imresize(im, [IMAGE_DIM IMAGE_DIM], 'bilinear'); +% permute from RGB to BGR (IMAGE_MEAN is already BGR) +im = im(:,:,[3 2 1]) - IMAGE_MEAN; + +% oversample (4 corners, center, and their x-axis flips) +images = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single'); +indices = [0 IMAGE_DIM-CROPPED_DIM] + 1; +curr = 1; +for i = indices + for j = indices + images(:, :, :, curr) = ... + permute(im(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :), [2 1 3]); + images(:, :, :, curr+5) = images(end:-1:1, :, :, curr); + curr = curr + 1; + end +end +center = floor(indices(2) / 2)+1; +images(:,:,:,5) = ... + permute(im(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:), ... + [2 1 3]); +images(:,:,:,10) = images(end:-1:1, :, :, curr); diff --git a/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_init.m b/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_init.m new file mode 100644 index 000000000..4e4ef8bff --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/matlab/caffe/matcaffe_init.m @@ -0,0 +1,44 @@ +function matcaffe_init(use_gpu, model_def_file, model_file) +% matcaffe_init(model_def_file, model_file, use_gpu) +% Initilize matcaffe wrapper + +if nargin < 1 + % By default use CPU + use_gpu = 0; +end +if nargin < 2 || isempty(model_def_file) + % By default use imagenet_deploy + model_def_file = '../../examples/imagenet/imagenet_deploy.prototxt'; +end +if nargin < 3 || isempty(model_file) + % By default use caffe reference model + model_file = '../../examples/imagenet/caffe_reference_imagenet_model'; +end + + +if caffe('is_initialized') == 0 + if exist(model_file, 'file') == 0 + % NOTE: you'll have to get the pre-trained ILSVRC network + error('You need a network model file'); + end + if ~exist(model_def_file,'file') + % NOTE: you'll have to get network definition + error('You need the network prototxt definition'); + end + caffe('init', model_def_file, model_file) +end +fprintf('Done with init\n'); + +% set to use GPU or CPU +if use_gpu + fprintf('Using GPU Mode\n'); + caffe('set_mode_gpu'); +else + fprintf('Using CPU Mode\n'); + caffe('set_mode_cpu'); +end +fprintf('Done with set_mode\n'); + +% put into test mode +caffe('set_phase_test'); +fprintf('Done with set_phase_test\n'); diff --git a/modules/dnns_easily_fooled/caffe/matlab/caffe/prepare_batch.m b/modules/dnns_easily_fooled/caffe/matlab/caffe/prepare_batch.m new file mode 100644 index 000000000..345c8eb5f --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/matlab/caffe/prepare_batch.m @@ -0,0 +1,41 @@ +% ------------------------------------------------------------------------ +function images = prepare_batch(image_files,IMAGE_MEAN,batch_size) +% ------------------------------------------------------------------------ +if nargin < 2 + d = load('ilsvrc_2012_mean'); + IMAGE_MEAN = d.image_mean; +end +num_images = length(image_files); +if nargin < 3 + batch_size = num_images; +end + +IMAGE_DIM = 256; +CROPPED_DIM = 227; +indices = [0 IMAGE_DIM-CROPPED_DIM] + 1; +center = floor(indices(2) / 2)+1; + +num_images = length(image_files); +images = zeros(CROPPED_DIM,CROPPED_DIM,3,batch_size,'single'); + +parfor i=1:num_images + % read file + fprintf('%c Preparing %s\n',13,image_files{i}); + try + im = imread(image_files{i}); + % resize to fixed input size + im = single(im); + im = imresize(im, [IMAGE_DIM IMAGE_DIM], 'bilinear'); + % Transform GRAY to RGB + if size(im,3) == 1 + im = cat(3,im,im,im); + end + % permute from RGB to BGR (IMAGE_MEAN is already BGR) + im = im(:,:,[3 2 1]) - IMAGE_MEAN; + % Crop the center of the image + images(:,:,:,i) = permute(im(center:center+CROPPED_DIM-1,... + center:center+CROPPED_DIM-1,:),[2 1 3]); + catch + warning('Problems with file',image_files{i}); + end +end \ No newline at end of file diff --git a/modules/dnns_easily_fooled/caffe/matlab/caffe/print_cell.m b/modules/dnns_easily_fooled/caffe/matlab/caffe/print_cell.m new file mode 100644 index 000000000..864340d4b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/matlab/caffe/print_cell.m @@ -0,0 +1,42 @@ +function res=print_cell(input,file,linesep,cellsep) +assert(iscell(input),'The input should be a cell') +if nargin < 4 + cellsep = '\t'; +end +if nargin < 3 + linesep = '\n'; +end +if exist('file','var') && ~isempty(file) + %% + fid = fopen(file,'w'); + for l=1:length(input) + if iscell(input{l}) + for i=1:length(input{l}) + fprintf(fid,['%s' cellsep],input{l}{i}); + end + fprintf(fid,linesep); + else + if size(input,2) > 1 + for i=1:size(input,2) + fprintf(fid,'%s ',input{l,i}); + end + fprintf(fid,linesep); + else + fprintf(fid,['%s' linesep],input{l}); + end + end + end + fclose(fid); +else + res = ''; + for l=1:length(input) + if iscell(input{l}) + for i=1:length(input{l}) + res = [res sprintf([cellsep{1} '%s' cellsep{2}],input{l}{i})]; + end + res = [res sprintf(linesep)]; + else + res = [res sprintf(['%s' linesep],input{l}(:))]; + end + end +end \ No newline at end of file diff --git a/modules/dnns_easily_fooled/caffe/matlab/caffe/read_cell.m b/modules/dnns_easily_fooled/caffe/matlab/caffe/read_cell.m new file mode 100644 index 000000000..198311671 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/matlab/caffe/read_cell.m @@ -0,0 +1,21 @@ +function res=read_cell(filename,linesep,cellsep) +if nargin < 2, linesep='\n'; end +if nargin < 3, cellsep = '\t'; end +if exist(filename,'file') + fid = fopen(filename); +else + % Assume that filename is either a file ide or a string + fid = filename; +end + +fileLines = textscan(fid,'%s','delimiter',linesep,'BufSize',100000); + +fileLines = fileLines{1}; + +if regexp(fileLines{1},cellsep,'once') + fileLines = regexprep(fileLines,['^' cellsep '|' cellsep '$'],''); + res = regexp(fileLines,cellsep,'split'); + res = cell2matcell(res); +else + res = fileLines; +end diff --git a/modules/dnns_easily_fooled/caffe/python/caffe/__init__.py b/modules/dnns_easily_fooled/caffe/python/caffe/__init__.py new file mode 100644 index 000000000..430bfce2b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/caffe/__init__.py @@ -0,0 +1,4 @@ +from .pycaffe import Net, SGDSolver +from .classifier import Classifier +from .detector import Detector +import io diff --git a/modules/dnns_easily_fooled/caffe/python/caffe/_caffe.cpp b/modules/dnns_easily_fooled/caffe/python/caffe/_caffe.cpp new file mode 100644 index 000000000..e9fe5cd3b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/caffe/_caffe.cpp @@ -0,0 +1,357 @@ +// Copyright 2014 BVLC and contributors. +// pycaffe provides a wrapper of the caffe::Net class as well as some +// caffe::Caffe functions so that one could easily call it from Python. +// Note that for Python, we will simply use float as the data type. + +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + +#include "boost/python.hpp" +#include "boost/python/suite/indexing/vector_indexing_suite.hpp" +#include "numpy/arrayobject.h" + +// these need to be included after boost on OS X +#include // NOLINT(build/include_order) +#include // NOLINT(build/include_order) +#include // NOLINT + +#include "caffe/caffe.hpp" + +// Temporary solution for numpy < 1.7 versions: old macro, no promises. +// You're strongly advised to upgrade to >= 1.7. +#ifndef NPY_ARRAY_C_CONTIGUOUS +#define NPY_ARRAY_C_CONTIGUOUS NPY_C_CONTIGUOUS +#define PyArray_SetBaseObject(arr, x) (PyArray_BASE(arr) = (x)) +#endif + + +using namespace caffe; // NOLINT(build/namespaces) +using boost::python::extract; +using boost::python::len; +using boost::python::list; +using boost::python::object; +using boost::python::handle; +using boost::python::vector_indexing_suite; + +// for convenience, check that input files can be opened, and raise an +// exception that boost will send to Python if not (caffe could still crash +// later if the input files are disturbed before they are actually used, but +// this saves frustration in most cases) +static void CheckFile(const string& filename) { + std::ifstream f(filename.c_str()); + if (!f.good()) { + f.close(); + throw std::runtime_error("Could not open file " + filename); + } + f.close(); +} + +// wrap shared_ptr > in a class that we construct in C++ and pass +// to Python +class CaffeBlob { + public: + CaffeBlob(const shared_ptr > &blob, const string& name) + : blob_(blob), name_(name) {} + + string name() const { return name_; } + int num() const { return blob_->num(); } + int channels() const { return blob_->channels(); } + int height() const { return blob_->height(); } + int width() const { return blob_->width(); } + int count() const { return blob_->count(); } + + // this is here only to satisfy boost's vector_indexing_suite + bool operator == (const CaffeBlob &other) { + return this->blob_ == other.blob_; + } + + protected: + shared_ptr > blob_; + string name_; +}; + + +// We need another wrapper (used as boost::python's HeldType) that receives a +// self PyObject * which we can use as ndarray.base, so that data/diff memory +// is not freed while still being used in Python. +class CaffeBlobWrap : public CaffeBlob { + public: + CaffeBlobWrap(PyObject *p, const CaffeBlob &blob) + : CaffeBlob(blob), self_(p) {} + + object get_data() { + npy_intp dims[] = {num(), channels(), height(), width()}; + + PyObject *obj = PyArray_SimpleNewFromData(4, dims, NPY_FLOAT32, + blob_->mutable_cpu_data()); + PyArray_SetBaseObject(reinterpret_cast(obj), self_); + Py_INCREF(self_); + handle<> h(obj); + + return object(h); + } + + object get_diff() { + npy_intp dims[] = {num(), channels(), height(), width()}; + + PyObject *obj = PyArray_SimpleNewFromData(4, dims, NPY_FLOAT32, + blob_->mutable_cpu_diff()); + PyArray_SetBaseObject(reinterpret_cast(obj), self_); + Py_INCREF(self_); + handle<> h(obj); + + return object(h); + } + + private: + PyObject *self_; +}; + + +class CaffeLayer { + public: + CaffeLayer(const shared_ptr > &layer, const string &name) + : layer_(layer), name_(name) {} + + string name() const { return name_; } + vector blobs() { + vector result; + for (int i = 0; i < layer_->blobs().size(); ++i) { + result.push_back(CaffeBlob(layer_->blobs()[i], name_)); + } + return result; + } + + // this is here only to satisfy boost's vector_indexing_suite + bool operator == (const CaffeLayer &other) { + return this->layer_ == other.layer_; + } + + protected: + shared_ptr > layer_; + string name_; +}; + + +// A simple wrapper over CaffeNet that runs the forward process. +struct CaffeNet { + // For cases where parameters will be determined later by the Python user, + // create a Net with unallocated parameters (which will not be zero-filled + // when accessed). + explicit CaffeNet(string param_file) { + Init(param_file); + } + + CaffeNet(string param_file, string pretrained_param_file) { + Init(param_file); + CheckFile(pretrained_param_file); + net_->CopyTrainedLayersFrom(pretrained_param_file); + } + + explicit CaffeNet(shared_ptr > net) + : net_(net) {} + + void Init(string param_file) { + CheckFile(param_file); + net_.reset(new Net(param_file)); + } + + + virtual ~CaffeNet() {} + + // Generate Python exceptions for badly shaped or discontiguous arrays. + inline void check_contiguous_array(PyArrayObject* arr, string name, + int channels, int height, int width) { + if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) { + throw std::runtime_error(name + " must be C contiguous"); + } + if (PyArray_NDIM(arr) != 4) { + throw std::runtime_error(name + " must be 4-d"); + } + if (PyArray_TYPE(arr) != NPY_FLOAT32) { + throw std::runtime_error(name + " must be float32"); + } + if (PyArray_DIMS(arr)[1] != channels) { + throw std::runtime_error(name + " has wrong number of channels"); + } + if (PyArray_DIMS(arr)[2] != height) { + throw std::runtime_error(name + " has wrong height"); + } + if (PyArray_DIMS(arr)[3] != width) { + throw std::runtime_error(name + " has wrong width"); + } + } + + void Forward() { + net_->ForwardPrefilled(); + } + + void Backward() { + net_->Backward(); + } + + void set_input_arrays(object data_obj, object labels_obj) { + // check that this network has an input MemoryDataLayer + shared_ptr > md_layer = + boost::dynamic_pointer_cast >(net_->layers()[0]); + if (!md_layer) { + throw std::runtime_error("set_input_arrays may only be called if the" + " first layer is a MemoryDataLayer"); + } + + // check that we were passed appropriately-sized contiguous memory + PyArrayObject* data_arr = + reinterpret_cast(data_obj.ptr()); + PyArrayObject* labels_arr = + reinterpret_cast(labels_obj.ptr()); + check_contiguous_array(data_arr, "data array", md_layer->datum_channels(), + md_layer->datum_height(), md_layer->datum_width()); + check_contiguous_array(labels_arr, "labels array", 1, 1, 1); + if (PyArray_DIMS(data_arr)[0] != PyArray_DIMS(labels_arr)[0]) { + throw std::runtime_error("data and labels must have the same first" + " dimension"); + } + if (PyArray_DIMS(data_arr)[0] % md_layer->batch_size() != 0) { + throw std::runtime_error("first dimensions of input arrays must be a" + " multiple of batch size"); + } + + // hold references + input_data_ = data_obj; + input_labels_ = labels_obj; + + md_layer->Reset(static_cast(PyArray_DATA(data_arr)), + static_cast(PyArray_DATA(labels_arr)), + PyArray_DIMS(data_arr)[0]); + } + + // save the network weights to binary proto for net surgeries. + void save(string filename) { + NetParameter net_param; + net_->ToProto(&net_param, false); + WriteProtoToBinaryFile(net_param, filename.c_str()); + } + + // The caffe::Caffe utility functions. + void set_mode_cpu() { Caffe::set_mode(Caffe::CPU); } + void set_mode_gpu() { Caffe::set_mode(Caffe::GPU); } + void set_phase_train() { Caffe::set_phase(Caffe::TRAIN); } + void set_phase_test() { Caffe::set_phase(Caffe::TEST); } + void set_device(int device_id) { Caffe::SetDevice(device_id); } + + vector blobs() { + vector result; + for (int i = 0; i < net_->blobs().size(); ++i) { + result.push_back(CaffeBlob(net_->blobs()[i], net_->blob_names()[i])); + } + return result; + } + + vector layers() { + vector result; + for (int i = 0; i < net_->layers().size(); ++i) { + result.push_back(CaffeLayer(net_->layers()[i], net_->layer_names()[i])); + } + return result; + } + + list inputs() { + list input_blob_names; + for (int i = 0; i < net_->input_blob_indices().size(); ++i) { + input_blob_names.append( + net_->blob_names()[net_->input_blob_indices()[i]]); + } + return input_blob_names; + } + + list outputs() { + list output_blob_names; + for (int i = 0; i < net_->output_blob_indices().size(); ++i) { + output_blob_names.append( + net_->blob_names()[net_->output_blob_indices()[i]]); + } + return output_blob_names; + } + + // The pointer to the internal caffe::Net instant. + shared_ptr > net_; + // if taking input from an ndarray, we need to hold references + object input_data_; + object input_labels_; +}; + +class CaffeSGDSolver { + public: + explicit CaffeSGDSolver(const string& param_file) { + // as in CaffeNet, (as a convenience, not a guarantee), create a Python + // exception if param_file can't be opened + CheckFile(param_file); + solver_.reset(new SGDSolver(param_file)); + // we need to explicitly store the net wrapper, rather than constructing + // it on the fly, so that it can hold references to Python objects + net_.reset(new CaffeNet(solver_->net())); + } + + shared_ptr net() { return net_; } + void Solve() { return solver_->Solve(); } + void SolveResume(const string& resume_file) { + CheckFile(resume_file); + return solver_->Solve(resume_file); + } + + protected: + shared_ptr net_; + shared_ptr > solver_; +}; + + +// The boost_python module definition. +BOOST_PYTHON_MODULE(_caffe) { + // below, we prepend an underscore to methods that will be replaced + // in Python + boost::python::class_ >( + "Net", boost::python::init()) + .def(boost::python::init()) + .def("_forward", &CaffeNet::Forward) + .def("_backward", &CaffeNet::Backward) + .def("set_mode_cpu", &CaffeNet::set_mode_cpu) + .def("set_mode_gpu", &CaffeNet::set_mode_gpu) + .def("set_phase_train", &CaffeNet::set_phase_train) + .def("set_phase_test", &CaffeNet::set_phase_test) + .def("set_device", &CaffeNet::set_device) + .add_property("_blobs", &CaffeNet::blobs) + .add_property("layers", &CaffeNet::layers) + .add_property("inputs", &CaffeNet::inputs) + .add_property("outputs", &CaffeNet::outputs) + .def("_set_input_arrays", &CaffeNet::set_input_arrays) + .def("save", &CaffeNet::save); + + boost::python::class_( + "Blob", boost::python::no_init) + .add_property("name", &CaffeBlob::name) + .add_property("num", &CaffeBlob::num) + .add_property("channels", &CaffeBlob::channels) + .add_property("height", &CaffeBlob::height) + .add_property("width", &CaffeBlob::width) + .add_property("count", &CaffeBlob::count) + .add_property("data", &CaffeBlobWrap::get_data) + .add_property("diff", &CaffeBlobWrap::get_diff); + + boost::python::class_( + "Layer", boost::python::no_init) + .add_property("name", &CaffeLayer::name) + .add_property("blobs", &CaffeLayer::blobs); + + boost::python::class_( + "SGDSolver", boost::python::init()) + .add_property("net", &CaffeSGDSolver::net) + .def("solve", &CaffeSGDSolver::Solve) + .def("solve", &CaffeSGDSolver::SolveResume); + + boost::python::class_ >("BlobVec") + .def(vector_indexing_suite, true>()); + + boost::python::class_ >("LayerVec") + .def(vector_indexing_suite, true>()); + + import_array(); +} diff --git a/modules/dnns_easily_fooled/caffe/python/caffe/classifier.py b/modules/dnns_easily_fooled/caffe/python/caffe/classifier.py new file mode 100644 index 000000000..f347be42a --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/caffe/classifier.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +""" +Classifier is an image classifier specialization of Net. +""" + +import numpy as np + +import caffe + + +class Classifier(caffe.Net): + """ + Classifier extends Net for image class prediction + by scaling, center cropping, or oversampling. + """ + def __init__(self, model_file, pretrained_file, image_dims=None, + gpu=False, mean_file=None, input_scale=None, channel_swap=None): + """ + Take + image_dims: dimensions to scale input for cropping/sampling. + Default is to scale to net input size for whole-image crop. + gpu, mean_file, input_scale, channel_swap: convenience params for + setting mode, mean, input scale, and channel order. + """ + caffe.Net.__init__(self, model_file, pretrained_file) + self.set_phase_test() + + if gpu: + self.set_mode_gpu() + else: + self.set_mode_cpu() + + if mean_file: + self.set_mean(self.inputs[0], mean_file) + if input_scale: + self.set_input_scale(self.inputs[0], input_scale) + if channel_swap: + self.set_channel_swap(self.inputs[0], channel_swap) + + self.crop_dims = np.array(self.blobs[self.inputs[0]].data.shape[2:]) + if not image_dims: + image_dims = self.crop_dims + self.image_dims = image_dims + + + def predict(self, inputs, oversample=True): + """ + Predict classification probabilities of inputs. + + Take + inputs: iterable of (H x W x K) input ndarrays. + oversample: average predictions across center, corners, and mirrors + when True (default). Center-only prediction when False. + + Give + predictions: (N x C) ndarray of class probabilities + for N images and C classes. + """ + # Scale to standardize input dimensions. + inputs = np.asarray([caffe.io.resize_image(im, self.image_dims) + for im in inputs]) + + if oversample: + # Generate center, corner, and mirrored crops. + inputs = caffe.io.oversample(inputs, self.crop_dims) + else: + # Take center crop. + center = np.array(self.image_dims) / 2.0 + crop = np.tile(center, (1, 2))[0] + np.concatenate([ + -self.crop_dims / 2.0, + self.crop_dims / 2.0 + ]) + inputs = inputs[:, crop[0]:crop[2], crop[1]:crop[3], :] + + # Classify + caffe_in = np.asarray([self.preprocess(self.inputs[0], in_) + for in_ in inputs]) + out = self.forward_all(**{self.inputs[0]: caffe_in}) + predictions = out[self.outputs[0]].squeeze(axis=(2,3)) + + # For oversampling, average predictions across crops. + if oversample: + predictions = predictions.reshape((len(predictions) / 10, 10, -1)) + predictions = predictions.mean(1) + + return predictions diff --git a/modules/dnns_easily_fooled/caffe/python/caffe/detector.py b/modules/dnns_easily_fooled/caffe/python/caffe/detector.py new file mode 100644 index 000000000..56c26aefd --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/caffe/detector.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +""" +Do windowed detection by classifying a number of images/crops at once, +optionally using the selective search window proposal method. + +This implementation follows ideas in + Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik. + Rich feature hierarchies for accurate object detection and semantic + segmentation. + http://arxiv.org/abs/1311.2524 + +The selective_search_ijcv_with_python code required for the selective search +proposal mode is available at + https://github.com/sergeyk/selective_search_ijcv_with_python +""" +import numpy as np +import os + +import caffe + + +class Detector(caffe.Net): + """ + Detector extends Net for windowed detection by a list of crops or + selective search proposals. + """ + def __init__(self, model_file, pretrained_file, gpu=False, mean_file=None, + input_scale=None, channel_swap=None, context_pad=None): + """ + Take + gpu, mean_file, input_scale, channel_swap: convenience params for + setting mode, mean, input scale, and channel order. + context_pad: amount of surrounding context to take s.t. a `context_pad` + sized border of pixels in the network input image is context, as in + R-CNN feature extraction. + """ + caffe.Net.__init__(self, model_file, pretrained_file) + self.set_phase_test() + + if gpu: + self.set_mode_gpu() + else: + self.set_mode_cpu() + + if mean_file: + self.set_mean(self.inputs[0], mean_file) + if input_scale: + self.set_input_scale(self.inputs[0], input_scale) + if channel_swap: + self.set_channel_swap(self.inputs[0], channel_swap) + + self.configure_crop(context_pad) + + + def detect_windows(self, images_windows): + """ + Do windowed detection over given images and windows. Windows are + extracted then warped to the input dimensions of the net. + + Take + images_windows: (image filename, window list) iterable. + context_crop: size of context border to crop in pixels. + + Give + detections: list of {filename: image filename, window: crop coordinates, + predictions: prediction vector} dicts. + """ + # Extract windows. + window_inputs = [] + for image_fname, windows in images_windows: + image = caffe.io.load_image(image_fname).astype(np.float32) + for window in windows: + window_inputs.append(self.crop(image, window)) + + # Run through the net (warping windows to input dimensions). + caffe_in = np.asarray([self.preprocess(self.inputs[0], window_in) + for window_in in window_inputs]) + out = self.forward_all(**{self.inputs[0]: caffe_in}) + predictions = out[self.outputs[0]].squeeze(axis=(2,3)) + + # Package predictions with images and windows. + detections = [] + ix = 0 + for image_fname, windows in images_windows: + for window in windows: + detections.append({ + 'window': window, + 'prediction': predictions[ix], + 'filename': image_fname + }) + ix += 1 + return detections + + + def detect_selective_search(self, image_fnames): + """ + Do windowed detection over Selective Search proposals by extracting + the crop and warping to the input dimensions of the net. + + Take + image_fnames: list + + Give + detections: list of {filename: image filename, window: crop coordinates, + predictions: prediction vector} dicts. + """ + import selective_search_ijcv_with_python as selective_search + # Make absolute paths so MATLAB can find the files. + image_fnames = [os.path.abspath(f) for f in image_fnames] + windows_list = selective_search.get_windows( + image_fnames, + cmd='selective_search_rcnn' + ) + # Run windowed detection on the selective search list. + return self.detect_windows(zip(image_fnames, windows_list)) + + + def crop(self, im, window): + """ + Crop a window from the image for detection. Include surrounding context + according to the `context_pad` configuration. + + Take + im: H x W x K image ndarray to crop. + window: bounding box coordinates as ymin, xmin, ymax, xmax. + + Give + crop: cropped window. + """ + # Crop window from the image. + crop = im[window[0]:window[2], window[1]:window[3]] + + if self.context_pad: + box = window.copy() + crop_size = self.blobs[self.inputs[0]].width # assumes square + scale = crop_size / (1. * crop_size - self.context_pad * 2) + # Crop a box + surrounding context. + half_h = (box[2] - box[0] + 1) / 2. + half_w = (box[3] - box[1] + 1) / 2. + center = (box[0] + half_h, box[1] + half_w) + scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w)) + box = np.round(np.tile(center, 2) + scaled_dims) + full_h = box[2] - box[0] + 1 + full_w = box[3] - box[1] + 1 + scale_h = crop_size / full_h + scale_w = crop_size / full_w + pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds + pad_x = round(max(0, -box[1]) * scale_w) + + # Clip box to image dimensions. + im_h, im_w = im.shape[:2] + box = np.clip(box, 0., [im_h, im_w, im_h, im_w]) + clip_h = box[2] - box[0] + 1 + clip_w = box[3] - box[1] + 1 + assert(clip_h > 0 and clip_w > 0) + crop_h = round(clip_h * scale_h) + crop_w = round(clip_w * scale_w) + if pad_y + crop_h > crop_size: + crop_h = crop_size - pad_y + if pad_x + crop_w > crop_size: + crop_w = crop_size - pad_x + + # collect with context padding and place in input + # with mean padding + context_crop = im[box[0]:box[2], box[1]:box[3]] + context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w)) + crop = self.crop_mean.copy() + crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop + + return crop + + + def configure_crop(self, context_pad): + """ + Configure amount of context for cropping. + If context is included, make the special input mean for context padding. + + Take + context_pad: amount of context for cropping. + """ + self.context_pad = context_pad + if self.context_pad: + input_scale = self.input_scale.get(self.inputs[0]) + channel_order = self.channel_swap.get(self.inputs[0]) + # Padding context crops needs the mean in unprocessed input space. + self.crop_mean = self.mean[self.inputs[0]].copy() + self.crop_mean = self.crop_mean.transpose((1,2,0)) + channel_order_inverse = [channel_order.index(i) + for i in range(self.crop_mean.shape[2])] + self.crop_mean = self.crop_mean[:,:, channel_order_inverse] + self.crop_mean /= input_scale diff --git a/modules/dnns_easily_fooled/caffe/python/caffe/draw.py b/modules/dnns_easily_fooled/caffe/python/caffe/draw.py new file mode 100644 index 000000000..f8631cfa0 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/caffe/draw.py @@ -0,0 +1,76 @@ +""" +Caffe network visualization: draw the NetParameter protobuffer. + +NOTE: this requires pydot>=1.0.2, which is not included in requirements.txt +since it requires graphviz and other prerequisites outside the scope of the +Caffe. +""" + +from caffe.proto import caffe_pb2 +from google.protobuf import text_format +import pydot + +# Internal layer and blob styles. +LAYER_STYLE = {'shape': 'record', 'fillcolor': '#6495ED', + 'style': 'filled'} +NEURON_LAYER_STYLE = {'shape': 'record', 'fillcolor': '#90EE90', + 'style': 'filled'} +BLOB_STYLE = {'shape': 'octagon', 'fillcolor': '#F0E68C', + 'style': 'filled'} +def get_enum_name_by_value(): + desc = caffe_pb2.LayerParameter.LayerType.DESCRIPTOR + d = {} + for k,v in desc.values_by_name.items(): + d[v.number] = k + return d + +def get_pydot_graph(caffe_net): + pydot_graph = pydot.Dot(caffe_net.name, graph_type='digraph', rankdir="BT") + pydot_nodes = {} + pydot_edges = [] + d = get_enum_name_by_value() + for layer in caffe_net.layers: + name = layer.name + layertype = d[layer.type] + if (len(layer.bottom) == 1 and len(layer.top) == 1 and + layer.bottom[0] == layer.top[0]): + # We have an in-place neuron layer. + pydot_nodes[name + '_' + layertype] = pydot.Node( + '%s (%s)' % (name, layertype), **NEURON_LAYER_STYLE) + else: + pydot_nodes[name + '_' + layertype] = pydot.Node( + '%s (%s)' % (name, layertype), **LAYER_STYLE) + for bottom_blob in layer.bottom: + pydot_nodes[bottom_blob + '_blob'] = pydot.Node( + '%s' % (bottom_blob), **BLOB_STYLE) + pydot_edges.append((bottom_blob + '_blob', name + '_' + layertype)) + for top_blob in layer.top: + pydot_nodes[top_blob + '_blob'] = pydot.Node( + '%s' % (top_blob)) + pydot_edges.append((name + '_' + layertype, top_blob + '_blob')) + # Now, add the nodes and edges to the graph. + for node in pydot_nodes.values(): + pydot_graph.add_node(node) + for edge in pydot_edges: + pydot_graph.add_edge( + pydot.Edge(pydot_nodes[edge[0]], pydot_nodes[edge[1]])) + return pydot_graph + +def draw_net(caffe_net, ext='png'): + """Draws a caffe net and returns the image string encoded using the given + extension. + + Input: + caffe_net: a caffe.proto.caffe_pb2.NetParameter protocol buffer. + ext: the image extension. Default 'png'. + """ + return get_pydot_graph(caffe_net).create(format=ext) + +def draw_net_to_file(caffe_net, filename): + """Draws a caffe net, and saves it to file using the format given as the + file extension. Use '.raw' to output raw text that you can manually feed + to graphviz to draw graphs. + """ + ext = filename[filename.rfind('.')+1:] + with open(filename, 'wb') as fid: + fid.write(draw_net(caffe_net, ext)) diff --git a/modules/dnns_easily_fooled/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy b/modules/dnns_easily_fooled/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy new file mode 100644 index 000000000..666082c6a Binary files /dev/null and b/modules/dnns_easily_fooled/caffe/python/caffe/imagenet/ilsvrc_2012_mean.npy differ diff --git a/modules/dnns_easily_fooled/caffe/python/caffe/io.py b/modules/dnns_easily_fooled/caffe/python/caffe/io.py new file mode 100644 index 000000000..1fc97231c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/caffe/io.py @@ -0,0 +1,159 @@ +import numpy as np +import skimage.io +import skimage.transform + +from caffe.proto import caffe_pb2 + + +def load_image(filename, color=True): + """ + Load an image converting from grayscale or alpha as needed. + + Take + filename: string + color: flag for color format. True (default) loads as RGB while False + loads as intensity (if image is already grayscale). + + Give + image: an image with type np.float32 of size (H x W x 3) in RGB or + of size (H x W x 1) in grayscale. + """ + img = skimage.img_as_float(skimage.io.imread(filename)).astype(np.float32) + if img.ndim == 2: + img = img[:, :, np.newaxis] + if color: + img = np.tile(img, (1, 1, 3)) + elif img.shape[2] == 4: + img = img[:, :, :3] + return img + + +def resize_image(im, new_dims, interp_order=1): + """ + Resize an image array with interpolation. + + Take + im: (H x W x K) ndarray + new_dims: (height, width) tuple of new dimensions. + interp_order: interpolation order, default is linear. + + Give + im: resized ndarray with shape (new_dims[0], new_dims[1], K) + """ + return skimage.transform.resize(im, new_dims, order=interp_order) + + +def oversample(images, crop_dims): + """ + Crop images into the four corners, center, and their mirrored versions. + + Take + image: iterable of (H x W x K) ndarrays + crop_dims: (height, width) tuple for the crops. + + Give + crops: (10*N x H x W x K) ndarray of crops for number of inputs N. + """ + # Dimensions and center. + im_shape = np.array(images[0].shape) + crop_dims = np.array(crop_dims) + im_center = im_shape[:2] / 2.0 + + # Make crop coordinates + h_indices = (0, im_shape[0] - crop_dims[0]) + w_indices = (0, im_shape[1] - crop_dims[1]) + crops_ix = np.empty((5, 4), dtype=int) + curr = 0 + for i in h_indices: + for j in w_indices: + crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1]) + curr += 1 + crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([ + -crop_dims / 2.0, + crop_dims / 2.0 + ]) + crops_ix = np.tile(crops_ix, (2, 1)) + + # Extract crops + crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1], + im_shape[-1]), dtype=np.float32) + ix = 0 + for im in images: + for crop in crops_ix: + crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :] + ix += 1 + crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors + return crops + + +def blobproto_to_array(blob, return_diff=False): + """Convert a blob proto to an array. In default, we will just return the data, + unless return_diff is True, in which case we will return the diff. + """ + if return_diff: + return np.array(blob.diff).reshape( + blob.num, blob.channels, blob.height, blob.width) + else: + return np.array(blob.data).reshape( + blob.num, blob.channels, blob.height, blob.width) + + +def array_to_blobproto(arr, diff=None): + """Converts a 4-dimensional array to blob proto. If diff is given, also + convert the diff. You need to make sure that arr and diff have the same + shape, and this function does not do sanity check. + """ + if arr.ndim != 4: + raise ValueError('Incorrect array shape.') + blob = caffe_pb2.BlobProto() + blob.num, blob.channels, blob.height, blob.width = arr.shape; + blob.data.extend(arr.astype(float).flat) + if diff is not None: + blob.diff.extend(diff.astype(float).flat) + return blob + + +def arraylist_to_blobprotovecor_str(arraylist): + """Converts a list of arrays to a serialized blobprotovec, which could be + then passed to a network for processing. + """ + vec = caffe_pb2.BlobProtoVector() + vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist]) + return vec.SerializeToString() + + +def blobprotovector_str_to_arraylist(str): + """Converts a serialized blobprotovec to a list of arrays. + """ + vec = caffe_pb2.BlobProtoVector() + vec.ParseFromString(str) + return [blobproto_to_array(blob) for blob in vec.blobs] + + +def array_to_datum(arr, label=0): + """Converts a 3-dimensional array to datum. If the array has dtype uint8, + the output data will be encoded as a string. Otherwise, the output data + will be stored in float format. + """ + if arr.ndim != 3: + raise ValueError('Incorrect array shape.') + datum = caffe_pb2.Datum() + datum.channels, datum.height, datum.width = arr.shape + if arr.dtype == np.uint8: + datum.data = arr.tostring() + else: + datum.float_data.extend(arr.flat) + datum.label = label + return datum + + +def datum_to_array(datum): + """Converts a datum to an array. Note that the label is not returned, + as one can easily get it by calling datum.label. + """ + if len(datum.data): + return np.fromstring(datum.data, dtype = np.uint8).reshape( + datum.channels, datum.height, datum.width) + else: + return np.array(datum.float_data).astype(float).reshape( + datum.channels, datum.height, datum.width) diff --git a/modules/dnns_easily_fooled/caffe/python/caffe/pycaffe.py b/modules/dnns_easily_fooled/caffe/python/caffe/pycaffe.py new file mode 100644 index 000000000..5c1512cd8 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/caffe/pycaffe.py @@ -0,0 +1,352 @@ +""" +Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic +interface. +""" + +from collections import OrderedDict +from itertools import izip_longest +import numpy as np + +from ._caffe import Net, SGDSolver +import caffe.io + +# We directly update methods from Net here (rather than using composition or +# inheritance) so that nets created by caffe (e.g., by SGDSolver) will +# automatically have the improved interface. + + +@property +def _Net_blobs(self): + """ + An OrderedDict (bottom to top, i.e., input to output) of network + blobs indexed by name + """ + return OrderedDict([(bl.name, bl) for bl in self._blobs]) + + +@property +def _Net_params(self): + """ + An OrderedDict (bottom to top, i.e., input to output) of network + parameters indexed by name; each is a list of multiple blobs (e.g., + weights and biases) + """ + return OrderedDict([(lr.name, lr.blobs) for lr in self.layers + if len(lr.blobs) > 0]) + + +def _Net_forward(self, blobs=None, **kwargs): + """ + Forward pass: prepare inputs and run the net forward. + + Take + blobs: list of blobs to return in addition to output blobs. + kwargs: Keys are input blob names and values are blob ndarrays. + For formatting inputs for Caffe, see Net.preprocess(). + If None, input is taken from data layers. + + Give + outs: {blob name: blob ndarray} dict. + """ + if blobs is None: + blobs = [] + + if kwargs: + if set(kwargs.keys()) != set(self.inputs): + raise Exception('Input blob arguments do not match net inputs.') + # Set input according to defined shapes and make arrays single and + # C-contiguous as Caffe expects. + for in_, blob in kwargs.iteritems(): + if blob.shape[0] != self.blobs[in_].num: + raise Exception('Input is not batch sized') + if blob.ndim != 4: + raise Exception('{} blob is not 4-d'.format(in_)) + self.blobs[in_].data[...] = blob + + self._forward() + + # Unpack blobs to extract + outs = {out: self.blobs[out].data for out in set(self.outputs + blobs)} + return outs + + +def _Net_backward(self, diffs=None, **kwargs): + """ + Backward pass: prepare diffs and run the net backward. + + Take + diffs: list of diffs to return in addition to bottom diffs. + kwargs: Keys are output blob names and values are diff ndarrays. + If None, top diffs are taken from forward loss. + + Give + outs: {blob name: diff ndarray} dict. + """ + if diffs is None: + diffs = [] + + if kwargs: + if set(kwargs.keys()) != set(self.outputs): + raise Exception('Top diff arguments do not match net outputs.') + # Set top diffs according to defined shapes and make arrays single and + # C-contiguous as Caffe expects. + for top, diff in kwargs.iteritems(): + if diff.shape[0] != self.blobs[top].num: + raise Exception('Diff is not batch sized') + if diff.ndim != 4: + raise Exception('{} diff is not 4-d'.format(top)) + self.blobs[top].diff[...] = diff + + self._backward() + + # Unpack diffs to extract + outs = {out: self.blobs[out].diff for out in set(self.inputs + diffs)} + return outs + + +def _Net_forward_all(self, blobs=None, **kwargs): + """ + Run net forward in batches. + + Take + blobs: list of blobs to extract as in forward() + kwargs: Keys are input blob names and values are blob ndarrays. + Refer to forward(). + + Give + all_outs: {blob name: list of blobs} dict. + """ + # Collect outputs from batches + all_outs = {out: [] for out in set(self.outputs + (blobs or []))} + for batch in self._batch(kwargs): + outs = self.forward(blobs=blobs, **batch) + for out, out_blob in outs.iteritems(): + all_outs[out].extend(out_blob.copy()) + # Package in ndarray. + for out in all_outs: + all_outs[out] = np.asarray(all_outs[out]) + # Discard padding. + pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next()) + if pad: + for out in all_outs: + all_outs[out] = all_outs[out][:-pad] + return all_outs + + +def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs): + """ + Run net forward + backward in batches. + + Take + blobs: list of blobs to extract as in forward() + diffs: list of diffs to extract as in backward() + kwargs: Keys are input (for forward) and output (for backward) blob names + and values are ndarrays. Refer to forward() and backward(). + Prefilled variants are called for lack of input or output blobs. + + Give + all_blobs: {blob name: blob ndarray} dict. + all_diffs: {blob name: diff ndarray} dict. + """ + # Batch blobs and diffs. + all_outs = {out: [] for out in set(self.outputs + (blobs or []))} + all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))} + forward_batches = self._batch({in_: kwargs[in_] + for in_ in self.inputs if in_ in kwargs}) + backward_batches = self._batch({out: kwargs[out] + for out in self.outputs if out in kwargs}) + # Collect outputs from batches (and heed lack of forward/backward batches). + for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}): + batch_blobs = self.forward(blobs=blobs, **fb) + batch_diffs = self.backward(diffs=diffs, **bb) + for out, out_blobs in batch_blobs.iteritems(): + all_outs[out].extend(out_blobs) + for diff, out_diffs in batch_diffs.iteritems(): + all_diffs[diff].extend(out_diffs) + # Package in ndarray. + for out, diff in zip(all_outs, all_diffs): + all_outs[out] = np.asarray(all_outs[out]) + all_diffs[diff] = np.asarray(all_diffs[diff]) + # Discard padding at the end and package in ndarray. + pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next()) + if pad: + for out, diff in zip(all_outs, all_diffs): + all_outs[out] = all_outs[out][:-pad] + all_diffs[diff] = all_diffs[diff][:-pad] + return all_outs, all_diffs + + +def _Net_set_mean(self, input_, mean_f, mode='elementwise'): + """ + Set the mean to subtract for data centering. + + Take + input_: which input to assign this mean. + mean_f: path to mean .npy with ndarray (input dimensional or broadcastable) + mode: elementwise = use the whole mean (and check dimensions) + channel = channel constant (e.g. mean pixel instead of mean image) + """ + if not hasattr(self, 'mean'): + self.mean = {} + if input_ not in self.inputs: + raise Exception('Input not in {}'.format(self.inputs)) + in_shape = self.blobs[input_].data.shape + mean = np.load(mean_f) + if mode == 'elementwise': + if mean.shape != in_shape[1:]: + # Resize mean (which requires H x W x K input in range [0,1]). + m_min, m_max = mean.min(), mean.max() + normal_mean = (mean - m_min) / (m_max - m_min) + mean = caffe.io.resize_image(normal_mean.transpose((1,2,0)), + in_shape[2:]).transpose((2,0,1)) * (m_max - m_min) + m_min + self.mean[input_] = mean + elif mode == 'channel': + self.mean[input_] = mean.mean(1).mean(1).reshape((in_shape[1], 1, 1)) + else: + raise Exception('Mode not in {}'.format(['elementwise', 'channel'])) + + + +def _Net_set_input_scale(self, input_, scale): + """ + Set the input feature scaling factor s.t. input blob = input * scale. + + Take + input_: which input to assign this scale factor + scale: scale coefficient + """ + if not hasattr(self, 'input_scale'): + self.input_scale = {} + if input_ not in self.inputs: + raise Exception('Input not in {}'.format(self.inputs)) + self.input_scale[input_] = scale + + +def _Net_set_channel_swap(self, input_, order): + """ + Set the input channel order for e.g. RGB to BGR conversion + as needed for the reference ImageNet model. + + Take + input_: which input to assign this channel order + order: the order to take the channels. + (2,1,0) maps RGB to BGR for example. + """ + if not hasattr(self, 'channel_swap'): + self.channel_swap = {} + if input_ not in self.inputs: + raise Exception('Input not in {}'.format(self.inputs)) + self.channel_swap[input_] = order + + +def _Net_preprocess(self, input_name, input_): + """ + Format input for Caffe: + - convert to single + - resize to input dimensions (preserving number of channels) + - scale feature + - reorder channels (for instance color to BGR) + - subtract mean + - transpose dimensions to K x H x W + + Take + input_name: name of input blob to preprocess for + input_: (H' x W' x K) ndarray + + Give + caffe_inputs: (K x H x W) ndarray + """ + caffe_in = input_.astype(np.float32) + input_scale = self.input_scale.get(input_name) + channel_order = self.channel_swap.get(input_name) + mean = self.mean.get(input_name) + in_size = self.blobs[input_name].data.shape[2:] + if caffe_in.shape[:2] != in_size: + caffe_in = caffe.io.resize_image(caffe_in, in_size) + if input_scale: + caffe_in *= input_scale + if channel_order: + caffe_in = caffe_in[:, :, channel_order] + caffe_in = caffe_in.transpose((2, 0, 1)) + if mean is not None: + caffe_in -= mean + return caffe_in + + +def _Net_deprocess(self, input_name, input_): + """ + Invert Caffe formatting; see Net.preprocess(). + """ + decaf_in = input_.copy().squeeze() + input_scale = self.input_scale.get(input_name) + channel_order = self.channel_swap.get(input_name) + mean = self.mean.get(input_name) + if mean is not None: + decaf_in += mean + decaf_in = decaf_in.transpose((1,2,0)) + if channel_order: + channel_order_inverse = [channel_order.index(i) + for i in range(decaf_in.shape[2])] + decaf_in = decaf_in[:, :, channel_order_inverse] + if input_scale: + decaf_in /= input_scale + return decaf_in + + +def _Net_set_input_arrays(self, data, labels): + """ + Set input arrays of the in-memory MemoryDataLayer. + (Note: this is only for networks declared with the memory data layer.) + """ + if labels.ndim == 1: + labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis, + np.newaxis]) + return self._set_input_arrays(data, labels) + + +def _Net_batch(self, blobs): + """ + Batch blob lists according to net's batch size. + + Take + blobs: Keys blob names and values are lists of blobs (of any length). + Naturally, all the lists should have the same length. + + Give (yield) + batch: {blob name: list of blobs} dict for a single batch. + """ + num = len(blobs.itervalues().next()) + batch_size = self.blobs.itervalues().next().num + remainder = num % batch_size + num_batches = num / batch_size + + # Yield full batches. + for b in range(num_batches): + i = b * batch_size + yield {name: blobs[name][i:i + batch_size] for name in blobs} + + # Yield last padded batch, if any. + if remainder > 0: + padded_batch = {} + for name in blobs: + padding = np.zeros((batch_size - remainder,) + + blobs[name].shape[1:]) + padded_batch[name] = np.concatenate([blobs[name][-remainder:], + padding]) + yield padded_batch + + +# Attach methods to Net. +Net.blobs = _Net_blobs +Net.params = _Net_params +Net.forward = _Net_forward +Net.backward = _Net_backward +Net.forward_all = _Net_forward_all +Net.forward_backward_all = _Net_forward_backward_all +Net.set_mean = _Net_set_mean +Net.set_input_scale = _Net_set_input_scale +Net.set_channel_swap = _Net_set_channel_swap +Net.preprocess = _Net_preprocess +Net.deprocess = _Net_deprocess +Net.set_input_arrays = _Net_set_input_arrays +Net._batch = _Net_batch diff --git a/modules/dnns_easily_fooled/caffe/python/classify.py b/modules/dnns_easily_fooled/caffe/python/classify.py new file mode 100755 index 000000000..fdaeeb01b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/classify.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python +""" +classify.py is an out-of-the-box image classifer callable from the command line. + +By default it configures and runs the Caffe reference ImageNet model. +""" +import numpy as np +import os +import sys +import argparse +import glob +import time + +import caffe + + +def main(argv): + pycaffe_dir = os.path.dirname(__file__) + + parser = argparse.ArgumentParser() + # Required arguments: input and output files. + parser.add_argument( + "input_file", + help="Input image, directory, or npy." + ) + parser.add_argument( + "output_file", + help="Output npy filename." + ) + # Optional arguments. + parser.add_argument( + "--model_def", + default=os.path.join(pycaffe_dir, + "../examples/imagenet/imagenet_deploy.prototxt"), + help="Model definition file." + ) + parser.add_argument( + "--pretrained_model", + default=os.path.join(pycaffe_dir, + "../examples/imagenet/caffe_reference_imagenet_model"), + help="Trained model weights file." + ) + parser.add_argument( + "--gpu", + action='store_true', + help="Switch for gpu computation." + ) + parser.add_argument( + "--center_only", + action='store_true', + help="Switch for prediction from center crop alone instead of " + + "averaging predictions across crops (default)." + ) + parser.add_argument( + "--images_dim", + default='256,256', + help="Canonical 'height,width' dimensions of input images." + ) + parser.add_argument( + "--mean_file", + default=os.path.join(pycaffe_dir, + 'caffe/imagenet/ilsvrc_2012_mean.npy'), + help="Data set image mean of H x W x K dimensions (numpy array). " + + "Set to '' for no mean subtraction." + ) + parser.add_argument( + "--input_scale", + type=float, + default=255, + help="Multiply input features by this scale before input to net" + ) + parser.add_argument( + "--channel_swap", + default='2,1,0', + help="Order to permute input channels. The default converts " + + "RGB -> BGR since BGR is the Caffe default by way of OpenCV." + + ) + parser.add_argument( + "--ext", + default='jpg', + help="Image file extension to take as input when a directory " + + "is given as the input file." + ) + args = parser.parse_args() + + image_dims = [int(s) for s in args.images_dim.split(',')] + channel_swap = [int(s) for s in args.channel_swap.split(',')] + + # Make classifier. + classifier = caffe.Classifier(args.model_def, args.pretrained_model, + image_dims=image_dims, gpu=args.gpu, mean_file=args.mean_file, + input_scale=args.input_scale, channel_swap=channel_swap) + + if args.gpu: + print 'GPU mode' + + # Load numpy array (.npy), directory glob (*.jpg), or image file. + args.input_file = os.path.expanduser(args.input_file) + if args.input_file.endswith('npy'): + inputs = np.load(args.input_file) + elif os.path.isdir(args.input_file): + inputs =[caffe.io.load_image(im_f) + for im_f in glob.glob(args.input_file + '/*.' + args.ext)] + else: + inputs = [caffe.io.load_image(args.input_file)] + + print "Classifying %d inputs." % len(inputs) + + # Classify. + start = time.time() + predictions = classifier.predict(inputs, not args.center_only) + print "Done in %.2f s." % (time.time() - start) + + # Save + np.save(args.output_file, predictions) + + +if __name__ == '__main__': + main(sys.argv) diff --git a/modules/dnns_easily_fooled/caffe/python/detect.py b/modules/dnns_easily_fooled/caffe/python/detect.py new file mode 100755 index 000000000..a3bee5c5c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/detect.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python +""" +detector.py is an out-of-the-box windowed detector +callable from the command line. + +By default it configures and runs the Caffe reference ImageNet model. +Note that this model was trained for image classification and not detection, +and finetuning for detection can be expected to improve results. + +The selective_search_ijcv_with_python code required for the selective search +proposal mode is available at + https://github.com/sergeyk/selective_search_ijcv_with_python + +TODO: +- batch up image filenames as well: don't want to load all of them into memory +- come up with a batching scheme that preserved order / keeps a unique ID +""" +import numpy as np +import pandas as pd +import os +import argparse +import time + +import caffe + +CROP_MODES = ['list', 'selective_search'] +COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax'] + + +def main(argv): + pycaffe_dir = os.path.dirname(__file__) + + parser = argparse.ArgumentParser() + # Required arguments: input and output. + parser.add_argument( + "input_file", + help="Input txt/csv filename. If .txt, must be list of filenames.\ + If .csv, must be comma-separated file with header\ + 'filename, xmin, ymin, xmax, ymax'" + ) + parser.add_argument( + "output_file", + help="Output h5/csv filename. Format depends on extension." + ) + # Optional arguments. + parser.add_argument( + "--model_def", + default=os.path.join(pycaffe_dir, + "../examples/imagenet/imagenet_deploy.prototxt"), + help="Model definition file." + ) + parser.add_argument( + "--pretrained_model", + default=os.path.join(pycaffe_dir, + "../examples/imagenet/caffe_reference_imagenet_model"), + help="Trained model weights file." + ) + parser.add_argument( + "--crop_mode", + default="selective_search", + choices=CROP_MODES, + help="How to generate windows for detection." + ) + parser.add_argument( + "--gpu", + action='store_true', + help="Switch for gpu computation." + ) + parser.add_argument( + "--mean_file", + default=os.path.join(pycaffe_dir, + 'caffe/imagenet/ilsvrc_2012_mean.npy'), + help="Data set image mean of H x W x K dimensions (numpy array). " + + "Set to '' for no mean subtraction." + ) + parser.add_argument( + "--input_scale", + type=float, + default=255, + help="Multiply input features by this scale before input to net" + ) + parser.add_argument( + "--channel_swap", + default='2,1,0', + help="Order to permute input channels. The default converts " + + "RGB -> BGR since BGR is the Caffe default by way of OpenCV." + + ) + parser.add_argument( + "--context_pad", + type=int, + default='16', + help="Amount of surrounding context to collect in input window." + ) + args = parser.parse_args() + + channel_swap = [int(s) for s in args.channel_swap.split(',')] + + # Make detector. + detector = caffe.Detector(args.model_def, args.pretrained_model, + gpu=args.gpu, mean_file=args.mean_file, + input_scale=args.input_scale, channel_swap=channel_swap, + context_pad=args.context_pad) + + if args.gpu: + print 'GPU mode' + + # Load input. + t = time.time() + print('Loading input...') + if args.input_file.lower().endswith('txt'): + with open(args.input_file) as f: + inputs = [_.strip() for _ in f.readlines()] + elif args.input_file.lower().endswith('csv'): + inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str}) + inputs.set_index('filename', inplace=True) + else: + raise Exception("Unknown input file type: not in txt or csv.") + + # Detect. + if args.crop_mode == 'list': + # Unpack sequence of (image filename, windows). + images_windows = ( + (ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values) + for ix in inputs.index.unique() + ) + detections = detector.detect_windows(images_windows) + else: + detections = detector.detect_selective_search(inputs) + print("Processed {} windows in {:.3f} s.".format(len(detections), + time.time() - t)) + + # Collect into dataframe with labeled fields. + df = pd.DataFrame(detections) + df.set_index('filename', inplace=True) + df[COORD_COLS] = pd.DataFrame( + data=np.vstack(df['window']), index=df.index, columns=COORD_COLS) + del(df['window']) + + # Save results. + t = time.time() + if args.output_file.lower().endswith('csv'): + # csv + # Enumerate the class probabilities. + class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)] + df[class_cols] = pd.DataFrame( + data=np.vstack(df['feat']), index=df.index, columns=class_cols) + df.to_csv(args.output_file, cols=COORD_COLS + class_cols) + else: + # h5 + df.to_hdf(args.output_file, 'df', mode='w') + print("Saved to {} in {:.3f} s.".format(args.output_file, + time.time() - t)) + + +if __name__ == "__main__": + import sys + main(sys.argv) diff --git a/modules/dnns_easily_fooled/caffe/python/draw_net.py b/modules/dnns_easily_fooled/caffe/python/draw_net.py new file mode 100755 index 000000000..ba4882942 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/draw_net.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +""" +Draw a graph of the net architecture. +""" +import os +from google.protobuf import text_format + +import caffe, caffe.draw +from caffe.proto import caffe_pb2 + + +def main(argv): + if len(argv) != 3: + print 'Usage: %s input_net_proto_file output_image_file' % \ + os.path.basename(sys.argv[0]) + else: + net = caffe_pb2.NetParameter() + text_format.Merge(open(sys.argv[1]).read(), net) + print 'Drawing net to %s' % sys.argv[2] + caffe.draw.draw_net_to_file(net, sys.argv[2]) + + +if __name__ == '__main__': + import sys + main(sys.argv) diff --git a/modules/dnns_easily_fooled/caffe/python/requirements.txt b/modules/dnns_easily_fooled/caffe/python/requirements.txt new file mode 100644 index 000000000..5c076b668 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/requirements.txt @@ -0,0 +1,14 @@ +Cython>=0.19.2 +h5py>=2.2.0 +ipython>=1.1.0 +leveldb>=0.191 +matplotlib>=1.3.1 +networkx>=1.8.1 +nose>=1.3.0 +numpy>=1.7.1 +pandas>=0.12.0 +protobuf>=2.5.0 +python-gflags>=2.0 +scikit-image>=0.9.3 +scikit-learn>=0.14.1 +scipy>=0.13.2 diff --git a/modules/dnns_easily_fooled/caffe/python/test.py b/modules/dnns_easily_fooled/caffe/python/test.py new file mode 100644 index 000000000..aabcfddbb --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/python/test.py @@ -0,0 +1,171 @@ +import numpy as np +import skimage.io +from scipy.ndimage import zoom +from skimage.transform import resize + +from caffe.proto import caffe_pb2 + + +def load_image(filename, color=True): + """ + Load an image converting from grayscale or alpha as needed. + + Take + filename: string + color: flag for color format. True (default) loads as RGB while False + loads as intensity (if image is already grayscale). + + Give + image: an image with type np.float32 in range [0, 1] + of size (H x W x 3) in RGB or + of size (H x W x 1) in grayscale. + """ + img = skimage.img_as_float(skimage.io.imread(filename)).astype(np.float32) + if img.ndim == 2: + img = img[:, :, np.newaxis] + if color: + img = np.tile(img, (1, 1, 3)) + elif img.shape[2] == 4: + img = img[:, :, :3] + return img + + +def resize_image(im, new_dims, interp_order=1): + """ + Resize an image array with interpolation. + + Take + im: (H x W x K) ndarray + new_dims: (height, width) tuple of new dimensions. + interp_order: interpolation order, default is linear. + + Give + im: resized ndarray with shape (new_dims[0], new_dims[1], K) + """ + if im.shape[-1] == 1 or im.shape[-1] == 3: + # skimage is fast but only understands {1,3} channel images in [0, 1]. + im_min, im_max = im.min(), im.max() + im_std = (im - im_min) / (im_max - im_min) + resized_std = resize(im_std, new_dims, order=interp_order) + resized_im = resized_std * (im_max - im_min) + im_min + else: + # ndimage interpolates anything but more slowly. + scale = tuple(np.array(new_dims) / np.array(im.shape[:2])) + resized_im = zoom(im, scale + (1,), order=interp_order) + return resized_im.astype(np.float32) + + +def oversample(images, crop_dims): + """ + Crop images into the four corners, center, and their mirrored versions. + + Take + image: iterable of (H x W x K) ndarrays + crop_dims: (height, width) tuple for the crops. + + Give + crops: (10*N x H x W x K) ndarray of crops for number of inputs N. + """ + # Dimensions and center. + im_shape = np.array(images[0].shape) + crop_dims = np.array(crop_dims) + im_center = im_shape[:2] / 2.0 + + # Make crop coordinates + h_indices = (0, im_shape[0] - crop_dims[0]) + w_indices = (0, im_shape[1] - crop_dims[1]) + crops_ix = np.empty((5, 4), dtype=int) + curr = 0 + for i in h_indices: + for j in w_indices: + crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1]) + curr += 1 + crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([ + -crop_dims / 2.0, + crop_dims / 2.0 + ]) + crops_ix = np.tile(crops_ix, (2, 1)) + + # Extract crops + crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1], + im_shape[-1]), dtype=np.float32) + ix = 0 + for im in images: + for crop in crops_ix: + crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :] + ix += 1 + crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors + return crops + + +def blobproto_to_array(blob, return_diff=False): + """Convert a blob proto to an array. In default, we will just return the data, + unless return_diff is True, in which case we will return the diff. + """ + if return_diff: + return np.array(blob.diff).reshape( + blob.num, blob.channels, blob.height, blob.width) + else: + return np.array(blob.data).reshape( + blob.num, blob.channels, blob.height, blob.width) + + +def array_to_blobproto(arr, diff=None): + """Converts a 4-dimensional array to blob proto. If diff is given, also + convert the diff. You need to make sure that arr and diff have the same + shape, and this function does not do sanity check. + """ + if arr.ndim != 4: + raise ValueError('Incorrect array shape.') + blob = caffe_pb2.BlobProto() + blob.num, blob.channels, blob.height, blob.width = arr.shape; + blob.data.extend(arr.astype(float).flat) + if diff is not None: + blob.diff.extend(diff.astype(float).flat) + return blob + + +def arraylist_to_blobprotovecor_str(arraylist): + """Converts a list of arrays to a serialized blobprotovec, which could be + then passed to a network for processing. + """ + vec = caffe_pb2.BlobProtoVector() + vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist]) + return vec.SerializeToString() + + +def blobprotovector_str_to_arraylist(str): + """Converts a serialized blobprotovec to a list of arrays. + """ + vec = caffe_pb2.BlobProtoVector() + vec.ParseFromString(str) + return [blobproto_to_array(blob) for blob in vec.blobs] + + +def array_to_datum(arr, label=0): + """Converts a 3-dimensional array to datum. If the array has dtype uint8, + the output data will be encoded as a string. Otherwise, the output data + will be stored in float format. + """ + if arr.ndim != 3: + raise ValueError('Incorrect array shape.') + datum = caffe_pb2.Datum() + datum.channels, datum.height, datum.width = arr.shape + if arr.dtype == np.uint8: + datum.data = arr.tostring() + else: + datum.float_data.extend(arr.flat) + datum.label = label + return datum + + +def datum_to_array(datum): + """Converts a datum to an array. Note that the label is not returned, + as one can easily get it by calling datum.label. + """ + if len(datum.data): + return np.fromstring(datum.data, dtype = np.uint8).reshape( + datum.channels, datum.height, datum.width) + else: + return np.array(datum.float_data).astype(float).reshape( + datum.channels, datum.height, datum.width) diff --git a/modules/dnns_easily_fooled/caffe/scripts/build_docs.sh b/modules/dnns_easily_fooled/caffe/scripts/build_docs.sh new file mode 100755 index 000000000..1faf8bd57 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/scripts/build_docs.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +PORT=${1:-4000} + +echo "usage: build_docs.sh [port]" + +# Find the docs dir, no matter where the script is called +DIR="$( cd "$(dirname "$0")" ; pwd -P )" +cd $DIR/../docs + +jekyll serve -w -s . -d _site --port=$PORT diff --git a/modules/dnns_easily_fooled/caffe/scripts/cpp_lint.py b/modules/dnns_easily_fooled/caffe/scripts/cpp_lint.py new file mode 100755 index 000000000..76eee4b2d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/scripts/cpp_lint.py @@ -0,0 +1,4796 @@ +#!/usr/bin/python +# +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Does google-lint on c++ files. + +The goal of this script is to identify places in the code that *may* +be in non-compliance with google style. It does not attempt to fix +up these problems -- the point is to educate. It does also not +attempt to find all problems, or to ensure that everything it does +find is legitimately a problem. + +In particular, we can get very confused by /* and // inside strings! +We do a small hack, which is to ignore //'s with "'s after them on the +same line, but it is far from perfect (in either direction). +""" + +import codecs +import copy +import getopt +import math # for log +import os +import re +import sre_compile +import string +import sys +import unicodedata + + +_USAGE = """ +Syntax: cpp_lint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] + [--counting=total|toplevel|detailed] [--root=subdir] + [--linelength=digits] + [file] ... + + The style guidelines this tries to follow are those in + http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml + + Every problem is given a confidence score from 1-5, with 5 meaning we are + certain of the problem, and 1 meaning it could be a legitimate construct. + This will miss some errors, and is not a substitute for a code review. + + To suppress false-positive errors of a certain category, add a + 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) + suppresses errors of all categories on that line. + + The files passed in will be linted; at least one file must be provided. + Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the + extensions with the --extensions flag. + + Flags: + + output=vs7 + By default, the output is formatted to ease emacs parsing. Visual Studio + compatible output (vs7) may also be used. Other formats are unsupported. + + verbose=# + Specify a number 0-5 to restrict errors to certain verbosity levels. + + filter=-x,+y,... + Specify a comma-separated list of category-filters to apply: only + error messages whose category names pass the filters will be printed. + (Category names are printed with the message and look like + "[whitespace/indent]".) Filters are evaluated left to right. + "-FOO" and "FOO" means "do not print categories that start with FOO". + "+FOO" means "do print categories that start with FOO". + + Examples: --filter=-whitespace,+whitespace/braces + --filter=whitespace,runtime/printf,+runtime/printf_format + --filter=-,+build/include_what_you_use + + To see a list of all the categories used in cpplint, pass no arg: + --filter= + + counting=total|toplevel|detailed + The total number of errors found is always printed. If + 'toplevel' is provided, then the count of errors in each of + the top-level categories like 'build' and 'whitespace' will + also be printed. If 'detailed' is provided, then a count + is provided for each category like 'build/class'. + + root=subdir + The root directory used for deriving header guard CPP variable. + By default, the header guard CPP variable is calculated as the relative + path to the directory that contains .git, .hg, or .svn. When this flag + is specified, the relative path is calculated from the specified + directory. If the specified directory does not exist, this flag is + ignored. + + Examples: + Assuing that src/.git exists, the header guard CPP variables for + src/chrome/browser/ui/browser.h are: + + No flag => CHROME_BROWSER_UI_BROWSER_H_ + --root=chrome => BROWSER_UI_BROWSER_H_ + --root=chrome/browser => UI_BROWSER_H_ + + linelength=digits + This is the allowed line length for the project. The default value is + 80 characters. + + Examples: + --linelength=120 + + extensions=extension,extension,... + The allowed file extensions that cpplint will check + + Examples: + --extensions=hpp,cpp +""" + +# We categorize each error message we print. Here are the categories. +# We want an explicit list so we can list them all in cpplint --filter=. +# If you add a new error message with a new category, add it to the list +# here! cpplint_unittest.py should tell you if you forget to do this. +_ERROR_CATEGORIES = [ + 'build/class', + 'build/deprecated', + 'build/endif_comment', + 'build/explicit_make_pair', + 'build/forward_decl', + 'build/header_guard', + 'build/include', + 'build/include_alpha', + 'build/include_dir', + 'build/include_order', + 'build/include_what_you_use', + 'build/namespaces', + 'build/printf_format', + 'build/storage_class', + 'caffe/random_fn', + 'legal/copyright', + 'readability/alt_tokens', + 'readability/braces', + 'readability/casting', + 'readability/check', + 'readability/constructors', + 'readability/fn_size', + 'readability/function', + 'readability/multiline_comment', + 'readability/multiline_string', + 'readability/namespace', + 'readability/nolint', + 'readability/nul', + 'readability/streams', + 'readability/todo', + 'readability/utf8', + 'runtime/arrays', + 'runtime/casting', + 'runtime/explicit', + 'runtime/int', + 'runtime/init', + 'runtime/invalid_increment', + 'runtime/member_string_references', + 'runtime/memset', + 'runtime/operator', + 'runtime/printf', + 'runtime/printf_format', + 'runtime/references', + 'runtime/string', + 'runtime/threadsafe_fn', + 'runtime/vlog', + 'whitespace/blank_line', + 'whitespace/braces', + 'whitespace/comma', + 'whitespace/comments', + 'whitespace/empty_conditional_body', + 'whitespace/empty_loop_body', + 'whitespace/end_of_line', + 'whitespace/ending_newline', + 'whitespace/forcolon', + 'whitespace/indent', + 'whitespace/line_length', + 'whitespace/newline', + 'whitespace/operators', + 'whitespace/parens', + 'whitespace/semicolon', + 'whitespace/tab', + 'whitespace/todo' + ] + +# The default state of the category filter. This is overrided by the --filter= +# flag. By default all errors are on, so only add here categories that should be +# off by default (i.e., categories that must be enabled by the --filter= flags). +# All entries here should start with a '-' or '+', as in the --filter= flag. +_DEFAULT_FILTERS = [ + '-build/include_alpha', + '-build/include_dir', + '-readability/todo', + ] + +# We used to check for high-bit characters, but after much discussion we +# decided those were OK, as long as they were in UTF-8 and didn't represent +# hard-coded international strings, which belong in a separate i18n file. + + +# C++ headers +_CPP_HEADERS = frozenset([ + # Legacy + 'algobase.h', + 'algo.h', + 'alloc.h', + 'builtinbuf.h', + 'bvector.h', + 'complex.h', + 'defalloc.h', + 'deque.h', + 'editbuf.h', + 'fstream.h', + 'function.h', + 'hash_map', + 'hash_map.h', + 'hash_set', + 'hash_set.h', + 'hashtable.h', + 'heap.h', + 'indstream.h', + 'iomanip.h', + 'iostream.h', + 'istream.h', + 'iterator.h', + 'list.h', + 'map.h', + 'multimap.h', + 'multiset.h', + 'ostream.h', + 'pair.h', + 'parsestream.h', + 'pfstream.h', + 'procbuf.h', + 'pthread_alloc', + 'pthread_alloc.h', + 'rope', + 'rope.h', + 'ropeimpl.h', + 'set.h', + 'slist', + 'slist.h', + 'stack.h', + 'stdiostream.h', + 'stl_alloc.h', + 'stl_relops.h', + 'streambuf.h', + 'stream.h', + 'strfile.h', + 'strstream.h', + 'tempbuf.h', + 'tree.h', + 'type_traits.h', + 'vector.h', + # 17.6.1.2 C++ library headers + 'algorithm', + 'array', + 'atomic', + 'bitset', + 'chrono', + 'codecvt', + 'complex', + 'condition_variable', + 'deque', + 'exception', + 'forward_list', + 'fstream', + 'functional', + 'future', + 'initializer_list', + 'iomanip', + 'ios', + 'iosfwd', + 'iostream', + 'istream', + 'iterator', + 'limits', + 'list', + 'locale', + 'map', + 'memory', + 'mutex', + 'new', + 'numeric', + 'ostream', + 'queue', + 'random', + 'ratio', + 'regex', + 'set', + 'sstream', + 'stack', + 'stdexcept', + 'streambuf', + 'string', + 'strstream', + 'system_error', + 'thread', + 'tuple', + 'typeindex', + 'typeinfo', + 'type_traits', + 'unordered_map', + 'unordered_set', + 'utility', + 'valarray', + 'vector', + # 17.6.1.2 C++ headers for C library facilities + 'cassert', + 'ccomplex', + 'cctype', + 'cerrno', + 'cfenv', + 'cfloat', + 'cinttypes', + 'ciso646', + 'climits', + 'clocale', + 'cmath', + 'csetjmp', + 'csignal', + 'cstdalign', + 'cstdarg', + 'cstdbool', + 'cstddef', + 'cstdint', + 'cstdio', + 'cstdlib', + 'cstring', + 'ctgmath', + 'ctime', + 'cuchar', + 'cwchar', + 'cwctype', + ]) + +# Assertion macros. These are defined in base/logging.h and +# testing/base/gunit.h. Note that the _M versions need to come first +# for substring matching to work. +_CHECK_MACROS = [ + 'DCHECK', 'CHECK', + 'EXPECT_TRUE_M', 'EXPECT_TRUE', + 'ASSERT_TRUE_M', 'ASSERT_TRUE', + 'EXPECT_FALSE_M', 'EXPECT_FALSE', + 'ASSERT_FALSE_M', 'ASSERT_FALSE', + ] + +# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE +_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) + +for op, replacement in [('==', 'EQ'), ('!=', 'NE'), + ('>=', 'GE'), ('>', 'GT'), + ('<=', 'LE'), ('<', 'LT')]: + _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement + _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement + +for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), + ('>=', 'LT'), ('>', 'LE'), + ('<=', 'GT'), ('<', 'GE')]: + _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement + _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement + +# Alternative tokens and their replacements. For full list, see section 2.5 +# Alternative tokens [lex.digraph] in the C++ standard. +# +# Digraphs (such as '%:') are not included here since it's a mess to +# match those on a word boundary. +_ALT_TOKEN_REPLACEMENT = { + 'and': '&&', + 'bitor': '|', + 'or': '||', + 'xor': '^', + 'compl': '~', + 'bitand': '&', + 'and_eq': '&=', + 'or_eq': '|=', + 'xor_eq': '^=', + 'not': '!', + 'not_eq': '!=' + } + +# Compile regular expression that matches all the above keywords. The "[ =()]" +# bit is meant to avoid matching these keywords outside of boolean expressions. +# +# False positives include C-style multi-line comments and multi-line strings +# but those have always been troublesome for cpplint. +_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( + r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') + + +# These constants define types of headers for use with +# _IncludeState.CheckNextIncludeOrder(). +_C_SYS_HEADER = 1 +_CPP_SYS_HEADER = 2 +_LIKELY_MY_HEADER = 3 +_POSSIBLE_MY_HEADER = 4 +_OTHER_HEADER = 5 + +# These constants define the current inline assembly state +_NO_ASM = 0 # Outside of inline assembly block +_INSIDE_ASM = 1 # Inside inline assembly block +_END_ASM = 2 # Last line of inline assembly block +_BLOCK_ASM = 3 # The whole block is an inline assembly block + +# Match start of assembly blocks +_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' + r'(?:\s+(volatile|__volatile__))?' + r'\s*[{(]') + + +_regexp_compile_cache = {} + +# Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...). +_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?') + +# {str, set(int)}: a map from error categories to sets of linenumbers +# on which those errors are expected and should be suppressed. +_error_suppressions = {} + +# Finds Copyright. +_RE_COPYRIGHT = re.compile(r'Copyright \d\d\d\d BVLC and contributors.') + +# The root directory used for deriving header guard CPP variable. +# This is set by --root flag. +_root = None + +# The allowed line length of files. +# This is set by --linelength flag. +_line_length = 80 + +# The allowed extensions for file names +# This is set by --extensions flag. +_valid_extensions = set(['cc', 'h', 'cpp', 'hpp', 'cu', 'cuh']) + +def ParseNolintSuppressions(filename, raw_line, linenum, error): + """Updates the global list of error-suppressions. + + Parses any NOLINT comments on the current line, updating the global + error_suppressions store. Reports an error if the NOLINT comment + was malformed. + + Args: + filename: str, the name of the input file. + raw_line: str, the line of input text, with comments. + linenum: int, the number of the current line. + error: function, an error handler. + """ + # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*). + matched = _RE_SUPPRESSION.search(raw_line) + if matched: + if matched.group(1) == '_NEXT_LINE': + linenum += 1 + category = matched.group(2) + if category in (None, '(*)'): # => "suppress all" + _error_suppressions.setdefault(None, set()).add(linenum) + else: + if category.startswith('(') and category.endswith(')'): + category = category[1:-1] + if category in _ERROR_CATEGORIES: + _error_suppressions.setdefault(category, set()).add(linenum) + else: + error(filename, linenum, 'readability/nolint', 5, + 'Unknown NOLINT error category: %s' % category) + + +def ResetNolintSuppressions(): + "Resets the set of NOLINT suppressions to empty." + _error_suppressions.clear() + + +def IsErrorSuppressedByNolint(category, linenum): + """Returns true if the specified error category is suppressed on this line. + + Consults the global error_suppressions map populated by + ParseNolintSuppressions/ResetNolintSuppressions. + + Args: + category: str, the category of the error. + linenum: int, the current line number. + Returns: + bool, True iff the error should be suppressed due to a NOLINT comment. + """ + return (linenum in _error_suppressions.get(category, set()) or + linenum in _error_suppressions.get(None, set())) + +def Match(pattern, s): + """Matches the string with the pattern, caching the compiled regexp.""" + # The regexp compilation caching is inlined in both Match and Search for + # performance reasons; factoring it out into a separate function turns out + # to be noticeably expensive. + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].match(s) + + +def ReplaceAll(pattern, rep, s): + """Replaces instances of pattern in a string with a replacement. + + The compiled regex is kept in a cache shared by Match and Search. + + Args: + pattern: regex pattern + rep: replacement text + s: search string + + Returns: + string with replacements made (or original string if no replacements) + """ + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].sub(rep, s) + + +def Search(pattern, s): + """Searches the string for the pattern, caching the compiled regexp.""" + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].search(s) + + +class _IncludeState(dict): + """Tracks line numbers for includes, and the order in which includes appear. + + As a dict, an _IncludeState object serves as a mapping between include + filename and line number on which that file was included. + + Call CheckNextIncludeOrder() once for each header in the file, passing + in the type constants defined above. Calls in an illegal order will + raise an _IncludeError with an appropriate error message. + + """ + # self._section will move monotonically through this set. If it ever + # needs to move backwards, CheckNextIncludeOrder will raise an error. + _INITIAL_SECTION = 0 + _MY_H_SECTION = 1 + _C_SECTION = 2 + _CPP_SECTION = 3 + _OTHER_H_SECTION = 4 + + _TYPE_NAMES = { + _C_SYS_HEADER: 'C system header', + _CPP_SYS_HEADER: 'C++ system header', + _LIKELY_MY_HEADER: 'header this file implements', + _POSSIBLE_MY_HEADER: 'header this file may implement', + _OTHER_HEADER: 'other header', + } + _SECTION_NAMES = { + _INITIAL_SECTION: "... nothing. (This can't be an error.)", + _MY_H_SECTION: 'a header this file implements', + _C_SECTION: 'C system header', + _CPP_SECTION: 'C++ system header', + _OTHER_H_SECTION: 'other header', + } + + def __init__(self): + dict.__init__(self) + self.ResetSection() + + def ResetSection(self): + # The name of the current section. + self._section = self._INITIAL_SECTION + # The path of last found header. + self._last_header = '' + + def SetLastHeader(self, header_path): + self._last_header = header_path + + def CanonicalizeAlphabeticalOrder(self, header_path): + """Returns a path canonicalized for alphabetical comparison. + + - replaces "-" with "_" so they both cmp the same. + - removes '-inl' since we don't require them to be after the main header. + - lowercase everything, just in case. + + Args: + header_path: Path to be canonicalized. + + Returns: + Canonicalized path. + """ + return header_path.replace('-inl.h', '.h').replace('-', '_').lower() + + def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): + """Check if a header is in alphabetical order with the previous header. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + header_path: Canonicalized header to be checked. + + Returns: + Returns true if the header is in alphabetical order. + """ + # If previous section is different from current section, _last_header will + # be reset to empty string, so it's always less than current header. + # + # If previous line was a blank line, assume that the headers are + # intentionally sorted the way they are. + if (self._last_header > header_path and + not Match(r'^\s*$', clean_lines.elided[linenum - 1])): + return False + return True + + def CheckNextIncludeOrder(self, header_type): + """Returns a non-empty error message if the next header is out of order. + + This function also updates the internal state to be ready to check + the next include. + + Args: + header_type: One of the _XXX_HEADER constants defined above. + + Returns: + The empty string if the header is in the right order, or an + error message describing what's wrong. + + """ + error_message = ('Found %s after %s' % + (self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section])) + + last_section = self._section + + if header_type == _C_SYS_HEADER: + if self._section <= self._C_SECTION: + self._section = self._C_SECTION + else: + self._last_header = '' + return error_message + elif header_type == _CPP_SYS_HEADER: + if self._section <= self._CPP_SECTION: + self._section = self._CPP_SECTION + else: + self._last_header = '' + return error_message + elif header_type == _LIKELY_MY_HEADER: + if self._section <= self._MY_H_SECTION: + self._section = self._MY_H_SECTION + else: + self._section = self._OTHER_H_SECTION + elif header_type == _POSSIBLE_MY_HEADER: + if self._section <= self._MY_H_SECTION: + self._section = self._MY_H_SECTION + else: + # This will always be the fallback because we're not sure + # enough that the header is associated with this file. + self._section = self._OTHER_H_SECTION + else: + assert header_type == _OTHER_HEADER + self._section = self._OTHER_H_SECTION + + if last_section != self._section: + self._last_header = '' + + return '' + + +class _CppLintState(object): + """Maintains module-wide state..""" + + def __init__(self): + self.verbose_level = 1 # global setting. + self.error_count = 0 # global count of reported errors + # filters to apply when emitting error messages + self.filters = _DEFAULT_FILTERS[:] + self.counting = 'total' # In what way are we counting errors? + self.errors_by_category = {} # string to int dict storing error counts + + # output format: + # "emacs" - format that emacs can parse (default) + # "vs7" - format that Microsoft Visual Studio 7 can parse + self.output_format = 'emacs' + + def SetOutputFormat(self, output_format): + """Sets the output format for errors.""" + self.output_format = output_format + + def SetVerboseLevel(self, level): + """Sets the module's verbosity, and returns the previous setting.""" + last_verbose_level = self.verbose_level + self.verbose_level = level + return last_verbose_level + + def SetCountingStyle(self, counting_style): + """Sets the module's counting options.""" + self.counting = counting_style + + def SetFilters(self, filters): + """Sets the error-message filters. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + filters: A string of comma-separated filters (eg "+whitespace/indent"). + Each filter should start with + or -; else we die. + + Raises: + ValueError: The comma-separated filters did not all start with '+' or '-'. + E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" + """ + # Default filters always have less priority than the flag ones. + self.filters = _DEFAULT_FILTERS[:] + for filt in filters.split(','): + clean_filt = filt.strip() + if clean_filt: + self.filters.append(clean_filt) + for filt in self.filters: + if not (filt.startswith('+') or filt.startswith('-')): + raise ValueError('Every filter in --filters must start with + or -' + ' (%s does not)' % filt) + + def ResetErrorCounts(self): + """Sets the module's error statistic back to zero.""" + self.error_count = 0 + self.errors_by_category = {} + + def IncrementErrorCount(self, category): + """Bumps the module's error statistic.""" + self.error_count += 1 + if self.counting in ('toplevel', 'detailed'): + if self.counting != 'detailed': + category = category.split('/')[0] + if category not in self.errors_by_category: + self.errors_by_category[category] = 0 + self.errors_by_category[category] += 1 + + def PrintErrorCounts(self): + """Print a summary of errors by category, and the total.""" + for category, count in self.errors_by_category.iteritems(): + sys.stderr.write('Category \'%s\' errors found: %d\n' % + (category, count)) + sys.stderr.write('Total errors found: %d\n' % self.error_count) + +_cpplint_state = _CppLintState() + + +def _OutputFormat(): + """Gets the module's output format.""" + return _cpplint_state.output_format + + +def _SetOutputFormat(output_format): + """Sets the module's output format.""" + _cpplint_state.SetOutputFormat(output_format) + + +def _VerboseLevel(): + """Returns the module's verbosity setting.""" + return _cpplint_state.verbose_level + + +def _SetVerboseLevel(level): + """Sets the module's verbosity, and returns the previous setting.""" + return _cpplint_state.SetVerboseLevel(level) + + +def _SetCountingStyle(level): + """Sets the module's counting options.""" + _cpplint_state.SetCountingStyle(level) + + +def _Filters(): + """Returns the module's list of output filters, as a list.""" + return _cpplint_state.filters + + +def _SetFilters(filters): + """Sets the module's error-message filters. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + filters: A string of comma-separated filters (eg "whitespace/indent"). + Each filter should start with + or -; else we die. + """ + _cpplint_state.SetFilters(filters) + + +class _FunctionState(object): + """Tracks current function name and the number of lines in its body.""" + + _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. + _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. + + def __init__(self): + self.in_a_function = False + self.lines_in_function = 0 + self.current_function = '' + + def Begin(self, function_name): + """Start analyzing function body. + + Args: + function_name: The name of the function being tracked. + """ + self.in_a_function = True + self.lines_in_function = 0 + self.current_function = function_name + + def Count(self): + """Count line in current function body.""" + if self.in_a_function: + self.lines_in_function += 1 + + def Check(self, error, filename, linenum): + """Report if too many lines in function body. + + Args: + error: The function to call with any errors found. + filename: The name of the current file. + linenum: The number of the line to check. + """ + if Match(r'T(EST|est)', self.current_function): + base_trigger = self._TEST_TRIGGER + else: + base_trigger = self._NORMAL_TRIGGER + trigger = base_trigger * 2**_VerboseLevel() + + if self.lines_in_function > trigger: + error_level = int(math.log(self.lines_in_function / base_trigger, 2)) + # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... + if error_level > 5: + error_level = 5 + error(filename, linenum, 'readability/fn_size', error_level, + 'Small and focused functions are preferred:' + ' %s has %d non-comment lines' + ' (error triggered by exceeding %d lines).' % ( + self.current_function, self.lines_in_function, trigger)) + + def End(self): + """Stop analyzing function body.""" + self.in_a_function = False + + +class _IncludeError(Exception): + """Indicates a problem with the include order in a file.""" + pass + + +class FileInfo: + """Provides utility functions for filenames. + + FileInfo provides easy access to the components of a file's path + relative to the project root. + """ + + def __init__(self, filename): + self._filename = filename + + def FullName(self): + """Make Windows paths like Unix.""" + return os.path.abspath(self._filename).replace('\\', '/') + + def RepositoryName(self): + """FullName after removing the local path to the repository. + + If we have a real absolute path name here we can try to do something smart: + detecting the root of the checkout and truncating /path/to/checkout from + the name so that we get header guards that don't include things like + "C:\Documents and Settings\..." or "/home/username/..." in them and thus + people on different computers who have checked the source out to different + locations won't see bogus errors. + """ + fullname = self.FullName() + + if os.path.exists(fullname): + project_dir = os.path.dirname(fullname) + + if os.path.exists(os.path.join(project_dir, ".svn")): + # If there's a .svn file in the current directory, we recursively look + # up the directory tree for the top of the SVN checkout + root_dir = project_dir + one_up_dir = os.path.dirname(root_dir) + while os.path.exists(os.path.join(one_up_dir, ".svn")): + root_dir = os.path.dirname(root_dir) + one_up_dir = os.path.dirname(one_up_dir) + + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by + # searching up from the current path. + root_dir = os.path.dirname(fullname) + while (root_dir != os.path.dirname(root_dir) and + not os.path.exists(os.path.join(root_dir, ".git")) and + not os.path.exists(os.path.join(root_dir, ".hg")) and + not os.path.exists(os.path.join(root_dir, ".svn"))): + root_dir = os.path.dirname(root_dir) + + if (os.path.exists(os.path.join(root_dir, ".git")) or + os.path.exists(os.path.join(root_dir, ".hg")) or + os.path.exists(os.path.join(root_dir, ".svn"))): + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Don't know what to do; header guard warnings may be wrong... + return fullname + + def Split(self): + """Splits the file into the directory, basename, and extension. + + For 'chrome/browser/browser.cc', Split() would + return ('chrome/browser', 'browser', '.cc') + + Returns: + A tuple of (directory, basename, extension). + """ + + googlename = self.RepositoryName() + project, rest = os.path.split(googlename) + return (project,) + os.path.splitext(rest) + + def BaseName(self): + """File base name - text after the final slash, before the final period.""" + return self.Split()[1] + + def Extension(self): + """File extension - text following the final period.""" + return self.Split()[2] + + def NoExtension(self): + """File has no source file extension.""" + return '/'.join(self.Split()[0:2]) + + def IsSource(self): + """File has a source file extension.""" + return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') + + +def _ShouldPrintError(category, confidence, linenum): + """If confidence >= verbose, category passes filter and is not suppressed.""" + + # There are three ways we might decide not to print an error message: + # a "NOLINT(category)" comment appears in the source, + # the verbosity level isn't high enough, or the filters filter it out. + if IsErrorSuppressedByNolint(category, linenum): + return False + if confidence < _cpplint_state.verbose_level: + return False + + is_filtered = False + for one_filter in _Filters(): + if one_filter.startswith('-'): + if category.startswith(one_filter[1:]): + is_filtered = True + elif one_filter.startswith('+'): + if category.startswith(one_filter[1:]): + is_filtered = False + else: + assert False # should have been checked for in SetFilter. + if is_filtered: + return False + + return True + + +def Error(filename, linenum, category, confidence, message): + """Logs the fact we've found a lint error. + + We log where the error was found, and also our confidence in the error, + that is, how certain we are this is a legitimate style regression, and + not a misidentification or a use that's sometimes justified. + + False positives can be suppressed by the use of + "cpplint(category)" comments on the offending line. These are + parsed into _error_suppressions. + + Args: + filename: The name of the file containing the error. + linenum: The number of the line containing the error. + category: A string used to describe the "category" this bug + falls under: "whitespace", say, or "runtime". Categories + may have a hierarchy separated by slashes: "whitespace/indent". + confidence: A number from 1-5 representing a confidence score for + the error, with 5 meaning that we are certain of the problem, + and 1 meaning that it could be a legitimate construct. + message: The error message. + """ + if _ShouldPrintError(category, confidence, linenum): + _cpplint_state.IncrementErrorCount(category) + if _cpplint_state.output_format == 'vs7': + sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( + filename, linenum, message, category, confidence)) + elif _cpplint_state.output_format == 'eclipse': + sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( + filename, linenum, message, category, confidence)) + else: + sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( + filename, linenum, message, category, confidence)) + + +# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. +_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( + r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') +# Matches strings. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') +# Matches characters. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") +# Matches multi-line C++ comments. +# This RE is a little bit more complicated than one might expect, because we +# have to take care of space removals tools so we can handle comments inside +# statements better. +# The current rule is: We only clear spaces from both sides when we're at the +# end of the line. Otherwise, we try to remove spaces from the right side, +# if this doesn't work we try on left side but only if there's a non-character +# on the right. +_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( + r"""(\s*/\*.*\*/\s*$| + /\*.*\*/\s+| + \s+/\*.*\*/(?=\W)| + /\*.*\*/)""", re.VERBOSE) + + +def IsCppString(line): + """Does line terminate so, that the next symbol is in string constant. + + This function does not consider single-line nor multi-line comments. + + Args: + line: is a partial line of code starting from the 0..n. + + Returns: + True, if next character appended to 'line' is inside a + string constant. + """ + + line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" + return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 + + +def CleanseRawStrings(raw_lines): + """Removes C++11 raw strings from lines. + + Before: + static const char kData[] = R"( + multi-line string + )"; + + After: + static const char kData[] = "" + (replaced by blank line) + ""; + + Args: + raw_lines: list of raw lines. + + Returns: + list of lines with C++11 raw strings replaced by empty strings. + """ + + delimiter = None + lines_without_raw_strings = [] + for line in raw_lines: + if delimiter: + # Inside a raw string, look for the end + end = line.find(delimiter) + if end >= 0: + # Found the end of the string, match leading space for this + # line and resume copying the original lines, and also insert + # a "" on the last line. + leading_space = Match(r'^(\s*)\S', line) + line = leading_space.group(1) + '""' + line[end + len(delimiter):] + delimiter = None + else: + # Haven't found the end yet, append a blank line. + line = '' + + else: + # Look for beginning of a raw string. + # See 2.14.15 [lex.string] for syntax. + matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) + if matched: + delimiter = ')' + matched.group(2) + '"' + + end = matched.group(3).find(delimiter) + if end >= 0: + # Raw string ended on same line + line = (matched.group(1) + '""' + + matched.group(3)[end + len(delimiter):]) + delimiter = None + else: + # Start of a multi-line raw string + line = matched.group(1) + '""' + + lines_without_raw_strings.append(line) + + # TODO(unknown): if delimiter is not None here, we might want to + # emit a warning for unterminated string. + return lines_without_raw_strings + + +def FindNextMultiLineCommentStart(lines, lineix): + """Find the beginning marker for a multiline comment.""" + while lineix < len(lines): + if lines[lineix].strip().startswith('/*'): + # Only return this marker if the comment goes beyond this line + if lines[lineix].strip().find('*/', 2) < 0: + return lineix + lineix += 1 + return len(lines) + + +def FindNextMultiLineCommentEnd(lines, lineix): + """We are inside a comment, find the end marker.""" + while lineix < len(lines): + if lines[lineix].strip().endswith('*/'): + return lineix + lineix += 1 + return len(lines) + + +def RemoveMultiLineCommentsFromRange(lines, begin, end): + """Clears a range of lines for multi-line comments.""" + # Having // dummy comments makes the lines non-empty, so we will not get + # unnecessary blank line warnings later in the code. + for i in range(begin, end): + lines[i] = '// dummy' + + +def RemoveMultiLineComments(filename, lines, error): + """Removes multiline (c-style) comments from lines.""" + lineix = 0 + while lineix < len(lines): + lineix_begin = FindNextMultiLineCommentStart(lines, lineix) + if lineix_begin >= len(lines): + return + lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) + if lineix_end >= len(lines): + error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, + 'Could not find end of multi-line comment') + return + RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) + lineix = lineix_end + 1 + + +def CleanseComments(line): + """Removes //-comments and single-line C-style /* */ comments. + + Args: + line: A line of C++ source. + + Returns: + The line with single-line comments removed. + """ + commentpos = line.find('//') + if commentpos != -1 and not IsCppString(line[:commentpos]): + line = line[:commentpos].rstrip() + # get rid of /* ... */ + return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) + + +class CleansedLines(object): + """Holds 3 copies of all lines with different preprocessing applied to them. + + 1) elided member contains lines without strings and comments, + 2) lines member contains lines without comments, and + 3) raw_lines member contains all the lines without processing. + All these three members are of , and of the same length. + """ + + def __init__(self, lines): + self.elided = [] + self.lines = [] + self.raw_lines = lines + self.num_lines = len(lines) + self.lines_without_raw_strings = CleanseRawStrings(lines) + for linenum in range(len(self.lines_without_raw_strings)): + self.lines.append(CleanseComments( + self.lines_without_raw_strings[linenum])) + elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) + self.elided.append(CleanseComments(elided)) + + def NumLines(self): + """Returns the number of lines represented.""" + return self.num_lines + + @staticmethod + def _CollapseStrings(elided): + """Collapses strings and chars on a line to simple "" or '' blocks. + + We nix strings first so we're not fooled by text like '"http://"' + + Args: + elided: The line being processed. + + Returns: + The line with collapsed strings. + """ + if not _RE_PATTERN_INCLUDE.match(elided): + # Remove escaped characters first to make quote/single quote collapsing + # basic. Things that look like escaped characters shouldn't occur + # outside of strings and chars. + elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) + elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) + elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) + return elided + + +def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): + """Find the position just after the matching endchar. + + Args: + line: a CleansedLines line. + startpos: start searching at this position. + depth: nesting level at startpos. + startchar: expression opening character. + endchar: expression closing character. + + Returns: + On finding matching endchar: (index just after matching endchar, 0) + Otherwise: (-1, new depth at end of this line) + """ + for i in xrange(startpos, len(line)): + if line[i] == startchar: + depth += 1 + elif line[i] == endchar: + depth -= 1 + if depth == 0: + return (i + 1, 0) + return (-1, depth) + + +def CloseExpression(clean_lines, linenum, pos): + """If input points to ( or { or [ or <, finds the position that closes it. + + If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the + linenum/pos that correspond to the closing of the expression. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + pos: A position on the line. + + Returns: + A tuple (line, linenum, pos) pointer *past* the closing brace, or + (line, len(lines), -1) if we never find a close. Note we ignore + strings and comments when matching; and the line we return is the + 'cleansed' line at linenum. + """ + + line = clean_lines.elided[linenum] + startchar = line[pos] + if startchar not in '({[<': + return (line, clean_lines.NumLines(), -1) + if startchar == '(': endchar = ')' + if startchar == '[': endchar = ']' + if startchar == '{': endchar = '}' + if startchar == '<': endchar = '>' + + # Check first line + (end_pos, num_open) = FindEndOfExpressionInLine( + line, pos, 0, startchar, endchar) + if end_pos > -1: + return (line, linenum, end_pos) + + # Continue scanning forward + while linenum < clean_lines.NumLines() - 1: + linenum += 1 + line = clean_lines.elided[linenum] + (end_pos, num_open) = FindEndOfExpressionInLine( + line, 0, num_open, startchar, endchar) + if end_pos > -1: + return (line, linenum, end_pos) + + # Did not find endchar before end of file, give up + return (line, clean_lines.NumLines(), -1) + + +def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar): + """Find position at the matching startchar. + + This is almost the reverse of FindEndOfExpressionInLine, but note + that the input position and returned position differs by 1. + + Args: + line: a CleansedLines line. + endpos: start searching at this position. + depth: nesting level at endpos. + startchar: expression opening character. + endchar: expression closing character. + + Returns: + On finding matching startchar: (index at matching startchar, 0) + Otherwise: (-1, new depth at beginning of this line) + """ + for i in xrange(endpos, -1, -1): + if line[i] == endchar: + depth += 1 + elif line[i] == startchar: + depth -= 1 + if depth == 0: + return (i, 0) + return (-1, depth) + + +def ReverseCloseExpression(clean_lines, linenum, pos): + """If input points to ) or } or ] or >, finds the position that opens it. + + If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the + linenum/pos that correspond to the opening of the expression. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + pos: A position on the line. + + Returns: + A tuple (line, linenum, pos) pointer *at* the opening brace, or + (line, 0, -1) if we never find the matching opening brace. Note + we ignore strings and comments when matching; and the line we + return is the 'cleansed' line at linenum. + """ + line = clean_lines.elided[linenum] + endchar = line[pos] + if endchar not in ')}]>': + return (line, 0, -1) + if endchar == ')': startchar = '(' + if endchar == ']': startchar = '[' + if endchar == '}': startchar = '{' + if endchar == '>': startchar = '<' + + # Check last line + (start_pos, num_open) = FindStartOfExpressionInLine( + line, pos, 0, startchar, endchar) + if start_pos > -1: + return (line, linenum, start_pos) + + # Continue scanning backward + while linenum > 0: + linenum -= 1 + line = clean_lines.elided[linenum] + (start_pos, num_open) = FindStartOfExpressionInLine( + line, len(line) - 1, num_open, startchar, endchar) + if start_pos > -1: + return (line, linenum, start_pos) + + # Did not find startchar before beginning of file, give up + return (line, 0, -1) + + +def CheckForCopyright(filename, lines, error): + """Logs an error if no Copyright message appears at the top of the file.""" + + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. + for line in xrange(1, min(len(lines), 11)): + if _RE_COPYRIGHT.search(lines[line], re.I): break + else: # means no copyright line was found + error(filename, 0, 'legal/copyright', 5, + 'BVLC copyright message not found. ' + 'You should have a line: "Copyright [year] BVLC and contributors."') + + +def GetHeaderGuardCPPVariable(filename): + """Returns the CPP variable that should be used as a header guard. + + Args: + filename: The name of a C++ header file. + + Returns: + The CPP variable that should be used as a header guard in the + named file. + + """ + + # Restores original filename in case that cpplint is invoked from Emacs's + # flymake. + filename = re.sub(r'_flymake\.h$', '.h', filename) + filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) + + fileinfo = FileInfo(filename) + file_path_from_root = fileinfo.RepositoryName() + if _root: + file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) + return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' + + +def CheckForHeaderGuard(filename, lines, error): + """Checks that the file contains a header guard. + + Logs an error if no #ifndef header guard is present. For other + headers, checks that the full pathname is used. + + Args: + filename: The name of the C++ header file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + cppvar = GetHeaderGuardCPPVariable(filename) + + ifndef = None + ifndef_linenum = 0 + define = None + endif = None + endif_linenum = 0 + for linenum, line in enumerate(lines): + linesplit = line.split() + if len(linesplit) >= 2: + # find the first occurrence of #ifndef and #define, save arg + if not ifndef and linesplit[0] == '#ifndef': + # set ifndef to the header guard presented on the #ifndef line. + ifndef = linesplit[1] + ifndef_linenum = linenum + if not define and linesplit[0] == '#define': + define = linesplit[1] + # find the last occurrence of #endif, save entire line + if line.startswith('#endif'): + endif = line + endif_linenum = linenum + + if not ifndef: + error(filename, 0, 'build/header_guard', 5, + 'No #ifndef header guard found, suggested CPP variable is: %s' % + cppvar) + return + + if not define: + error(filename, 0, 'build/header_guard', 5, + 'No #define header guard found, suggested CPP variable is: %s' % + cppvar) + return + + # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ + # for backward compatibility. + if ifndef != cppvar: + error_level = 0 + if ifndef != cppvar + '_': + error_level = 5 + + ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, + error) + error(filename, ifndef_linenum, 'build/header_guard', error_level, + '#ifndef header guard has wrong style, please use: %s' % cppvar) + + if define != ifndef: + error(filename, 0, 'build/header_guard', 5, + '#ifndef and #define don\'t match, suggested CPP variable is: %s' % + cppvar) + return + + if endif != ('#endif // %s' % cppvar): + error_level = 0 + if endif != ('#endif // %s' % (cppvar + '_')): + error_level = 5 + + ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, + error) + error(filename, endif_linenum, 'build/header_guard', error_level, + '#endif line should be "#endif // %s"' % cppvar) + + +def CheckForBadCharacters(filename, lines, error): + """Logs an error for each line containing bad characters. + + Two kinds of bad characters: + + 1. Unicode replacement characters: These indicate that either the file + contained invalid UTF-8 (likely) or Unicode replacement characters (which + it shouldn't). Note that it's possible for this to throw off line + numbering if the invalid UTF-8 occurred adjacent to a newline. + + 2. NUL bytes. These are problematic for some tools. + + Args: + filename: The name of the current file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + for linenum, line in enumerate(lines): + if u'\ufffd' in line: + error(filename, linenum, 'readability/utf8', 5, + 'Line contains invalid UTF-8 (or Unicode replacement character).') + if '\0' in line: + error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') + + +def CheckForNewlineAtEOF(filename, lines, error): + """Logs an error if there is no newline char at the end of the file. + + Args: + filename: The name of the current file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + # The array lines() was created by adding two newlines to the + # original file (go figure), then splitting on \n. + # To verify that the file ends in \n, we just have to make sure the + # last-but-two element of lines() exists and is empty. + if len(lines) < 3 or lines[-2]: + error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, + 'Could not find a newline character at the end of the file.') + + +def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): + """Logs an error if we see /* ... */ or "..." that extend past one line. + + /* ... */ comments are legit inside macros, for one line. + Otherwise, we prefer // comments, so it's ok to warn about the + other. Likewise, it's ok for strings to extend across multiple + lines, as long as a line continuation character (backslash) + terminates each line. Although not currently prohibited by the C++ + style guide, it's ugly and unnecessary. We don't do well with either + in this lint program, so we warn about both. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Remove all \\ (escaped backslashes) from the line. They are OK, and the + # second (escaped) slash may trigger later \" detection erroneously. + line = line.replace('\\\\', '') + + if line.count('/*') > line.count('*/'): + error(filename, linenum, 'readability/multiline_comment', 5, + 'Complex multi-line /*...*/-style comment found. ' + 'Lint may give bogus warnings. ' + 'Consider replacing these with //-style comments, ' + 'with #if 0...#endif, ' + 'or with more clearly structured multi-line comments.') + + if (line.count('"') - line.count('\\"')) % 2: + error(filename, linenum, 'readability/multiline_string', 5, + 'Multi-line string ("...") found. This lint script doesn\'t ' + 'do well with such strings, and may give bogus warnings. ' + 'Use C++11 raw strings or concatenation instead.') + + +c_random_function_list = ( + 'rand(', + 'rand_r(', + 'random(', + ) + +def CheckCaffeRandom(filename, clean_lines, linenum, error): + """Checks for calls to C random functions (rand, rand_r, random, ...). + + Caffe code should (almost) always use the caffe_rng_* functions rather + than these, as the internal state of these C functions is independent of the + native Caffe RNG system which should produce deterministic results for a + fixed Caffe seed set using Caffe::set_random_seed(...). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + for function in c_random_function_list: + ix = line.find(function) + # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison + if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and + line[ix - 1] not in ('_', '.', '>'))): + error(filename, linenum, 'caffe/random_fn', 2, + 'Use caffe_rng_rand() (or other caffe_rng_* function) instead of ' + + function + + ') to ensure results are deterministic for a fixed Caffe seed.') + + +threading_list = ( + ('asctime(', 'asctime_r('), + ('ctime(', 'ctime_r('), + ('getgrgid(', 'getgrgid_r('), + ('getgrnam(', 'getgrnam_r('), + ('getlogin(', 'getlogin_r('), + ('getpwnam(', 'getpwnam_r('), + ('getpwuid(', 'getpwuid_r('), + ('gmtime(', 'gmtime_r('), + ('localtime(', 'localtime_r('), + ('strtok(', 'strtok_r('), + ('ttyname(', 'ttyname_r('), + ) + + +def CheckPosixThreading(filename, clean_lines, linenum, error): + """Checks for calls to thread-unsafe functions. + + Much code has been originally written without consideration of + multi-threading. Also, engineers are relying on their old experience; + they have learned posix before threading extensions were added. These + tests guide the engineers to use thread-safe functions (when using + posix directly). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + for single_thread_function, multithread_safe_function in threading_list: + ix = line.find(single_thread_function) + # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison + if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and + line[ix - 1] not in ('_', '.', '>'))): + error(filename, linenum, 'runtime/threadsafe_fn', 2, + 'Consider using ' + multithread_safe_function + + '...) instead of ' + single_thread_function + + '...) for improved thread safety.') + + +def CheckVlogArguments(filename, clean_lines, linenum, error): + """Checks that VLOG() is only used for defining a logging level. + + For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and + VLOG(FATAL) are not. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): + error(filename, linenum, 'runtime/vlog', 5, + 'VLOG() should be used with numeric verbosity level. ' + 'Use LOG() if you want symbolic severity levels.') + + +# Matches invalid increment: *count++, which moves pointer instead of +# incrementing a value. +_RE_PATTERN_INVALID_INCREMENT = re.compile( + r'^\s*\*\w+(\+\+|--);') + + +def CheckInvalidIncrement(filename, clean_lines, linenum, error): + """Checks for invalid increment *count++. + + For example following function: + void increment_counter(int* count) { + *count++; + } + is invalid, because it effectively does count++, moving pointer, and should + be replaced with ++*count, (*count)++ or *count += 1. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + if _RE_PATTERN_INVALID_INCREMENT.match(line): + error(filename, linenum, 'runtime/invalid_increment', 5, + 'Changing pointer instead of value (or unused value of operator*).') + + +class _BlockInfo(object): + """Stores information about a generic block of code.""" + + def __init__(self, seen_open_brace): + self.seen_open_brace = seen_open_brace + self.open_parentheses = 0 + self.inline_asm = _NO_ASM + + def CheckBegin(self, filename, clean_lines, linenum, error): + """Run checks that applies to text up to the opening brace. + + This is mostly for checking the text after the class identifier + and the "{", usually where the base class is specified. For other + blocks, there isn't much to check, so we always pass. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + pass + + def CheckEnd(self, filename, clean_lines, linenum, error): + """Run checks that applies to text after the closing brace. + + This is mostly used for checking end of namespace comments. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + pass + + +class _ClassInfo(_BlockInfo): + """Stores information about a class.""" + + def __init__(self, name, class_or_struct, clean_lines, linenum): + _BlockInfo.__init__(self, False) + self.name = name + self.starting_linenum = linenum + self.is_derived = False + if class_or_struct == 'struct': + self.access = 'public' + self.is_struct = True + else: + self.access = 'private' + self.is_struct = False + + # Remember initial indentation level for this class. Using raw_lines here + # instead of elided to account for leading comments. + initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum]) + if initial_indent: + self.class_indent = len(initial_indent.group(1)) + else: + self.class_indent = 0 + + # Try to find the end of the class. This will be confused by things like: + # class A { + # } *x = { ... + # + # But it's still good enough for CheckSectionSpacing. + self.last_line = 0 + depth = 0 + for i in range(linenum, clean_lines.NumLines()): + line = clean_lines.elided[i] + depth += line.count('{') - line.count('}') + if not depth: + self.last_line = i + break + + def CheckBegin(self, filename, clean_lines, linenum, error): + # Look for a bare ':' + if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): + self.is_derived = True + + def CheckEnd(self, filename, clean_lines, linenum, error): + # Check that closing brace is aligned with beginning of the class. + # Only do this if the closing brace is indented by only whitespaces. + # This means we will not check single-line class definitions. + indent = Match(r'^( *)\}', clean_lines.elided[linenum]) + if indent and len(indent.group(1)) != self.class_indent: + if self.is_struct: + parent = 'struct ' + self.name + else: + parent = 'class ' + self.name + error(filename, linenum, 'whitespace/indent', 3, + 'Closing brace should be aligned with beginning of %s' % parent) + + +class _NamespaceInfo(_BlockInfo): + """Stores information about a namespace.""" + + def __init__(self, name, linenum): + _BlockInfo.__init__(self, False) + self.name = name or '' + self.starting_linenum = linenum + + def CheckEnd(self, filename, clean_lines, linenum, error): + """Check end of namespace comments.""" + line = clean_lines.raw_lines[linenum] + + # Check how many lines is enclosed in this namespace. Don't issue + # warning for missing namespace comments if there aren't enough + # lines. However, do apply checks if there is already an end of + # namespace comment and it's incorrect. + # + # TODO(unknown): We always want to check end of namespace comments + # if a namespace is large, but sometimes we also want to apply the + # check if a short namespace contained nontrivial things (something + # other than forward declarations). There is currently no logic on + # deciding what these nontrivial things are, so this check is + # triggered by namespace size only, which works most of the time. + if (linenum - self.starting_linenum < 10 + and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): + return + + # Look for matching comment at end of namespace. + # + # Note that we accept C style "/* */" comments for terminating + # namespaces, so that code that terminate namespaces inside + # preprocessor macros can be cpplint clean. + # + # We also accept stuff like "// end of namespace ." with the + # period at the end. + # + # Besides these, we don't accept anything else, otherwise we might + # get false negatives when existing comment is a substring of the + # expected namespace. + if self.name: + # Named namespace + if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + + r'[\*/\.\\\s]*$'), + line): + error(filename, linenum, 'readability/namespace', 5, + 'Namespace should be terminated with "// namespace %s"' % + self.name) + else: + # Anonymous namespace + if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): + error(filename, linenum, 'readability/namespace', 5, + 'Namespace should be terminated with "// namespace"') + + +class _PreprocessorInfo(object): + """Stores checkpoints of nesting stacks when #if/#else is seen.""" + + def __init__(self, stack_before_if): + # The entire nesting stack before #if + self.stack_before_if = stack_before_if + + # The entire nesting stack up to #else + self.stack_before_else = [] + + # Whether we have already seen #else or #elif + self.seen_else = False + + +class _NestingState(object): + """Holds states related to parsing braces.""" + + def __init__(self): + # Stack for tracking all braces. An object is pushed whenever we + # see a "{", and popped when we see a "}". Only 3 types of + # objects are possible: + # - _ClassInfo: a class or struct. + # - _NamespaceInfo: a namespace. + # - _BlockInfo: some other type of block. + self.stack = [] + + # Stack of _PreprocessorInfo objects. + self.pp_stack = [] + + def SeenOpenBrace(self): + """Check if we have seen the opening brace for the innermost block. + + Returns: + True if we have seen the opening brace, False if the innermost + block is still expecting an opening brace. + """ + return (not self.stack) or self.stack[-1].seen_open_brace + + def InNamespaceBody(self): + """Check if we are currently one level inside a namespace body. + + Returns: + True if top of the stack is a namespace block, False otherwise. + """ + return self.stack and isinstance(self.stack[-1], _NamespaceInfo) + + def UpdatePreprocessor(self, line): + """Update preprocessor stack. + + We need to handle preprocessors due to classes like this: + #ifdef SWIG + struct ResultDetailsPageElementExtensionPoint { + #else + struct ResultDetailsPageElementExtensionPoint : public Extension { + #endif + + We make the following assumptions (good enough for most files): + - Preprocessor condition evaluates to true from #if up to first + #else/#elif/#endif. + + - Preprocessor condition evaluates to false from #else/#elif up + to #endif. We still perform lint checks on these lines, but + these do not affect nesting stack. + + Args: + line: current line to check. + """ + if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): + # Beginning of #if block, save the nesting stack here. The saved + # stack will allow us to restore the parsing state in the #else case. + self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) + elif Match(r'^\s*#\s*(else|elif)\b', line): + # Beginning of #else block + if self.pp_stack: + if not self.pp_stack[-1].seen_else: + # This is the first #else or #elif block. Remember the + # whole nesting stack up to this point. This is what we + # keep after the #endif. + self.pp_stack[-1].seen_else = True + self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) + + # Restore the stack to how it was before the #if + self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) + else: + # TODO(unknown): unexpected #else, issue warning? + pass + elif Match(r'^\s*#\s*endif\b', line): + # End of #if or #else blocks. + if self.pp_stack: + # If we saw an #else, we will need to restore the nesting + # stack to its former state before the #else, otherwise we + # will just continue from where we left off. + if self.pp_stack[-1].seen_else: + # Here we can just use a shallow copy since we are the last + # reference to it. + self.stack = self.pp_stack[-1].stack_before_else + # Drop the corresponding #if + self.pp_stack.pop() + else: + # TODO(unknown): unexpected #endif, issue warning? + pass + + def Update(self, filename, clean_lines, linenum, error): + """Update nesting state with current line. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Update pp_stack first + self.UpdatePreprocessor(line) + + # Count parentheses. This is to avoid adding struct arguments to + # the nesting stack. + if self.stack: + inner_block = self.stack[-1] + depth_change = line.count('(') - line.count(')') + inner_block.open_parentheses += depth_change + + # Also check if we are starting or ending an inline assembly block. + if inner_block.inline_asm in (_NO_ASM, _END_ASM): + if (depth_change != 0 and + inner_block.open_parentheses == 1 and + _MATCH_ASM.match(line)): + # Enter assembly block + inner_block.inline_asm = _INSIDE_ASM + else: + # Not entering assembly block. If previous line was _END_ASM, + # we will now shift to _NO_ASM state. + inner_block.inline_asm = _NO_ASM + elif (inner_block.inline_asm == _INSIDE_ASM and + inner_block.open_parentheses == 0): + # Exit assembly block + inner_block.inline_asm = _END_ASM + + # Consume namespace declaration at the beginning of the line. Do + # this in a loop so that we catch same line declarations like this: + # namespace proto2 { namespace bridge { class MessageSet; } } + while True: + # Match start of namespace. The "\b\s*" below catches namespace + # declarations even if it weren't followed by a whitespace, this + # is so that we don't confuse our namespace checker. The + # missing spaces will be flagged by CheckSpacing. + namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) + if not namespace_decl_match: + break + + new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) + self.stack.append(new_namespace) + + line = namespace_decl_match.group(2) + if line.find('{') != -1: + new_namespace.seen_open_brace = True + line = line[line.find('{') + 1:] + + # Look for a class declaration in whatever is left of the line + # after parsing namespaces. The regexp accounts for decorated classes + # such as in: + # class LOCKABLE API Object { + # }; + # + # Templates with class arguments may confuse the parser, for example: + # template , + # class Vector = vector > + # class HeapQueue { + # + # Because this parser has no nesting state about templates, by the + # time it saw "class Comparator", it may think that it's a new class. + # Nested templates have a similar problem: + # template < + # typename ExportedType, + # typename TupleType, + # template class ImplTemplate> + # + # To avoid these cases, we ignore classes that are followed by '=' or '>' + class_decl_match = Match( + r'\s*(template\s*<[\w\s<>,:]*>\s*)?' + r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)' + r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line) + if (class_decl_match and + (not self.stack or self.stack[-1].open_parentheses == 0)): + self.stack.append(_ClassInfo( + class_decl_match.group(4), class_decl_match.group(2), + clean_lines, linenum)) + line = class_decl_match.group(5) + + # If we have not yet seen the opening brace for the innermost block, + # run checks here. + if not self.SeenOpenBrace(): + self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) + + # Update access control if we are inside a class/struct + if self.stack and isinstance(self.stack[-1], _ClassInfo): + classinfo = self.stack[-1] + access_match = Match( + r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' + r':(?:[^:]|$)', + line) + if access_match: + classinfo.access = access_match.group(2) + + # Check that access keywords are indented +1 space. Skip this + # check if the keywords are not preceded by whitespaces. + indent = access_match.group(1) + if (len(indent) != classinfo.class_indent + 1 and + Match(r'^\s*$', indent)): + if classinfo.is_struct: + parent = 'struct ' + classinfo.name + else: + parent = 'class ' + classinfo.name + slots = '' + if access_match.group(3): + slots = access_match.group(3) + error(filename, linenum, 'whitespace/indent', 3, + '%s%s: should be indented +1 space inside %s' % ( + access_match.group(2), slots, parent)) + + # Consume braces or semicolons from what's left of the line + while True: + # Match first brace, semicolon, or closed parenthesis. + matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) + if not matched: + break + + token = matched.group(1) + if token == '{': + # If namespace or class hasn't seen a opening brace yet, mark + # namespace/class head as complete. Push a new block onto the + # stack otherwise. + if not self.SeenOpenBrace(): + self.stack[-1].seen_open_brace = True + else: + self.stack.append(_BlockInfo(True)) + if _MATCH_ASM.match(line): + self.stack[-1].inline_asm = _BLOCK_ASM + elif token == ';' or token == ')': + # If we haven't seen an opening brace yet, but we already saw + # a semicolon, this is probably a forward declaration. Pop + # the stack for these. + # + # Similarly, if we haven't seen an opening brace yet, but we + # already saw a closing parenthesis, then these are probably + # function arguments with extra "class" or "struct" keywords. + # Also pop these stack for these. + if not self.SeenOpenBrace(): + self.stack.pop() + else: # token == '}' + # Perform end of block checks and pop the stack. + if self.stack: + self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) + self.stack.pop() + line = matched.group(2) + + def InnermostClass(self): + """Get class info on the top of the stack. + + Returns: + A _ClassInfo object if we are inside a class, or None otherwise. + """ + for i in range(len(self.stack), 0, -1): + classinfo = self.stack[i - 1] + if isinstance(classinfo, _ClassInfo): + return classinfo + return None + + def CheckCompletedBlocks(self, filename, error): + """Checks that all classes and namespaces have been completely parsed. + + Call this when all lines in a file have been processed. + Args: + filename: The name of the current file. + error: The function to call with any errors found. + """ + # Note: This test can result in false positives if #ifdef constructs + # get in the way of brace matching. See the testBuildClass test in + # cpplint_unittest.py for an example of this. + for obj in self.stack: + if isinstance(obj, _ClassInfo): + error(filename, obj.starting_linenum, 'build/class', 5, + 'Failed to find complete declaration of class %s' % + obj.name) + elif isinstance(obj, _NamespaceInfo): + error(filename, obj.starting_linenum, 'build/namespaces', 5, + 'Failed to find complete declaration of namespace %s' % + obj.name) + + +def CheckForNonStandardConstructs(filename, clean_lines, linenum, + nesting_state, error): + r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. + + Complain about several constructs which gcc-2 accepts, but which are + not standard C++. Warning about these in lint is one way to ease the + transition to new compilers. + - put storage class first (e.g. "static const" instead of "const static"). + - "%lld" instead of %qd" in printf-type functions. + - "%1$d" is non-standard in printf-type functions. + - "\%" is an undefined character escape sequence. + - text after #endif is not allowed. + - invalid inner-style forward declaration. + - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', + line): + error(filename, linenum, 'build/deprecated', 3, + '>? and ))?' + # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' + error(filename, linenum, 'runtime/member_string_references', 2, + 'const string& members are dangerous. It is much better to use ' + 'alternatives, such as pointers or simple constants.') + + # Everything else in this function operates on class declarations. + # Return early if the top of the nesting stack is not a class, or if + # the class head is not completed yet. + classinfo = nesting_state.InnermostClass() + if not classinfo or not classinfo.seen_open_brace: + return + + # The class may have been declared with namespace or classname qualifiers. + # The constructor and destructor will not have those qualifiers. + base_classname = classinfo.name.split('::')[-1] + + # Look for single-argument constructors that aren't marked explicit. + # Technically a valid construct, but against style. + args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)' + % re.escape(base_classname), + line) + if (args and + args.group(1) != 'void' and + not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&' + % re.escape(base_classname), args.group(1).strip())): + error(filename, linenum, 'runtime/explicit', 5, + 'Single-argument constructors should be marked explicit.') + + +def CheckSpacingForFunctionCall(filename, line, linenum, error): + """Checks for the correctness of various spacing around function calls. + + Args: + filename: The name of the current file. + line: The text of the line to check. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + # Since function calls often occur inside if/for/while/switch + # expressions - which have their own, more liberal conventions - we + # first see if we should be looking inside such an expression for a + # function call, to which we can apply more strict standards. + fncall = line # if there's no control flow construct, look at whole line + for pattern in (r'\bif\s*\((.*)\)\s*{', + r'\bfor\s*\((.*)\)\s*{', + r'\bwhile\s*\((.*)\)\s*[{;]', + r'\bswitch\s*\((.*)\)\s*{'): + match = Search(pattern, line) + if match: + fncall = match.group(1) # look inside the parens for function calls + break + + # Except in if/for/while/switch, there should never be space + # immediately inside parens (eg "f( 3, 4 )"). We make an exception + # for nested parens ( (a+b) + c ). Likewise, there should never be + # a space before a ( when it's a function argument. I assume it's a + # function argument when the char before the whitespace is legal in + # a function name (alnum + _) and we're not starting a macro. Also ignore + # pointers and references to arrays and functions coz they're too tricky: + # we use a very simple way to recognize these: + # " (something)(maybe-something)" or + # " (something)(maybe-something," or + # " (something)[something]" + # Note that we assume the contents of [] to be short enough that + # they'll never need to wrap. + if ( # Ignore control structures. + not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', + fncall) and + # Ignore pointers/references to functions. + not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and + # Ignore pointers/references to arrays. + not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): + if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call + error(filename, linenum, 'whitespace/parens', 4, + 'Extra space after ( in function call') + elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): + error(filename, linenum, 'whitespace/parens', 2, + 'Extra space after (') + if (Search(r'\w\s+\(', fncall) and + not Search(r'#\s*define|typedef', fncall) and + not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)): + error(filename, linenum, 'whitespace/parens', 4, + 'Extra space before ( in function call') + # If the ) is followed only by a newline or a { + newline, assume it's + # part of a control statement (if/while/etc), and don't complain + if Search(r'[^)]\s+\)\s*[^{\s]', fncall): + # If the closing parenthesis is preceded by only whitespaces, + # try to give a more descriptive error message. + if Search(r'^\s+\)', fncall): + error(filename, linenum, 'whitespace/parens', 2, + 'Closing ) should be moved to the previous line') + else: + error(filename, linenum, 'whitespace/parens', 2, + 'Extra space before )') + + +def IsBlankLine(line): + """Returns true if the given line is blank. + + We consider a line to be blank if the line is empty or consists of + only white spaces. + + Args: + line: A line of a string. + + Returns: + True, if the given line is blank. + """ + return not line or line.isspace() + + +def CheckForFunctionLengths(filename, clean_lines, linenum, + function_state, error): + """Reports for long function bodies. + + For an overview why this is done, see: + http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions + + Uses a simplistic algorithm assuming other style guidelines + (especially spacing) are followed. + Only checks unindented functions, so class members are unchecked. + Trivial bodies are unchecked, so constructors with huge initializer lists + may be missed. + Blank/comment lines are not counted so as to avoid encouraging the removal + of vertical space and comments just to get through a lint check. + NOLINT *on the last line of a function* disables this check. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + function_state: Current function name and lines in body so far. + error: The function to call with any errors found. + """ + lines = clean_lines.lines + line = lines[linenum] + raw = clean_lines.raw_lines + raw_line = raw[linenum] + joined_line = '' + + starting_func = False + regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... + match_result = Match(regexp, line) + if match_result: + # If the name is all caps and underscores, figure it's a macro and + # ignore it, unless it's TEST or TEST_F. + function_name = match_result.group(1).split()[-1] + if function_name == 'TEST' or function_name == 'TEST_F' or ( + not Match(r'[A-Z_]+$', function_name)): + starting_func = True + + if starting_func: + body_found = False + for start_linenum in xrange(linenum, clean_lines.NumLines()): + start_line = lines[start_linenum] + joined_line += ' ' + start_line.lstrip() + if Search(r'(;|})', start_line): # Declarations and trivial functions + body_found = True + break # ... ignore + elif Search(r'{', start_line): + body_found = True + function = Search(r'((\w|:)*)\(', line).group(1) + if Match(r'TEST', function): # Handle TEST... macros + parameter_regexp = Search(r'(\(.*\))', joined_line) + if parameter_regexp: # Ignore bad syntax + function += parameter_regexp.group(1) + else: + function += '()' + function_state.Begin(function) + break + if not body_found: + # No body for the function (or evidence of a non-function) was found. + error(filename, linenum, 'readability/fn_size', 5, + 'Lint failed to find start of function body.') + elif Match(r'^\}\s*$', line): # function end + function_state.Check(error, filename, linenum) + function_state.End() + elif not Match(r'^\s*$', line): + function_state.Count() # Count non-blank/non-comment lines. + + +_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') + + +def CheckComment(comment, filename, linenum, error): + """Checks for common mistakes in TODO comments. + + Args: + comment: The text of the comment from the line in question. + filename: The name of the current file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + match = _RE_PATTERN_TODO.match(comment) + if match: + # One whitespace is correct; zero whitespace is handled elsewhere. + leading_whitespace = match.group(1) + if len(leading_whitespace) > 1: + error(filename, linenum, 'whitespace/todo', 2, + 'Too many spaces before TODO') + + username = match.group(2) + if not username: + error(filename, linenum, 'readability/todo', 2, + 'Missing username in TODO; it should look like ' + '"// TODO(my_username): Stuff."') + + middle_whitespace = match.group(3) + # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison + if middle_whitespace != ' ' and middle_whitespace != '': + error(filename, linenum, 'whitespace/todo', 2, + 'TODO(my_username) should be followed by a space') + +def CheckAccess(filename, clean_lines, linenum, nesting_state, error): + """Checks for improper use of DISALLOW* macros. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] # get rid of comments and strings + + matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' + r'DISALLOW_EVIL_CONSTRUCTORS|' + r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) + if not matched: + return + if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): + if nesting_state.stack[-1].access != 'private': + error(filename, linenum, 'readability/constructors', 3, + '%s must be in the private: section' % matched.group(1)) + + else: + # Found DISALLOW* macro outside a class declaration, or perhaps it + # was used inside a function when it should have been part of the + # class declaration. We could issue a warning here, but it + # probably resulted in a compiler error already. + pass + + +def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix): + """Find the corresponding > to close a template. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: Current line number. + init_suffix: Remainder of the current line after the initial <. + + Returns: + True if a matching bracket exists. + """ + line = init_suffix + nesting_stack = ['<'] + while True: + # Find the next operator that can tell us whether < is used as an + # opening bracket or as a less-than operator. We only want to + # warn on the latter case. + # + # We could also check all other operators and terminate the search + # early, e.g. if we got something like this "a(),;\[\]]*([<>(),;\[\]])(.*)$', line) + if match: + # Found an operator, update nesting stack + operator = match.group(1) + line = match.group(2) + + if nesting_stack[-1] == '<': + # Expecting closing angle bracket + if operator in ('<', '(', '['): + nesting_stack.append(operator) + elif operator == '>': + nesting_stack.pop() + if not nesting_stack: + # Found matching angle bracket + return True + elif operator == ',': + # Got a comma after a bracket, this is most likely a template + # argument. We have not seen a closing angle bracket yet, but + # it's probably a few lines later if we look for it, so just + # return early here. + return True + else: + # Got some other operator. + return False + + else: + # Expecting closing parenthesis or closing bracket + if operator in ('<', '(', '['): + nesting_stack.append(operator) + elif operator in (')', ']'): + # We don't bother checking for matching () or []. If we got + # something like (] or [), it would have been a syntax error. + nesting_stack.pop() + + else: + # Scan the next line + linenum += 1 + if linenum >= len(clean_lines.elided): + break + line = clean_lines.elided[linenum] + + # Exhausted all remaining lines and still no matching angle bracket. + # Most likely the input was incomplete, otherwise we should have + # seen a semicolon and returned early. + return True + + +def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix): + """Find the corresponding < that started a template. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: Current line number. + init_prefix: Part of the current line before the initial >. + + Returns: + True if a matching bracket exists. + """ + line = init_prefix + nesting_stack = ['>'] + while True: + # Find the previous operator + match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line) + if match: + # Found an operator, update nesting stack + operator = match.group(2) + line = match.group(1) + + if nesting_stack[-1] == '>': + # Expecting opening angle bracket + if operator in ('>', ')', ']'): + nesting_stack.append(operator) + elif operator == '<': + nesting_stack.pop() + if not nesting_stack: + # Found matching angle bracket + return True + elif operator == ',': + # Got a comma before a bracket, this is most likely a + # template argument. The opening angle bracket is probably + # there if we look for it, so just return early here. + return True + else: + # Got some other operator. + return False + + else: + # Expecting opening parenthesis or opening bracket + if operator in ('>', ')', ']'): + nesting_stack.append(operator) + elif operator in ('(', '['): + nesting_stack.pop() + + else: + # Scan the previous line + linenum -= 1 + if linenum < 0: + break + line = clean_lines.elided[linenum] + + # Exhausted all earlier lines and still no matching angle bracket. + return False + + +def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): + """Checks for the correctness of various spacing issues in the code. + + Things we check for: spaces around operators, spaces after + if/for/while/switch, no spaces around parens in function calls, two + spaces between code and comment, don't start a block with a blank + line, don't end a function with a blank line, don't add a blank line + after public/protected/private, don't have too many blank lines in a row. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + + # Don't use "elided" lines here, otherwise we can't check commented lines. + # Don't want to use "raw" either, because we don't want to check inside C++11 + # raw strings, + raw = clean_lines.lines_without_raw_strings + line = raw[linenum] + + # Before nixing comments, check if the line is blank for no good + # reason. This includes the first line after a block is opened, and + # blank lines at the end of a function (ie, right before a line like '}' + # + # Skip all the blank line checks if we are immediately inside a + # namespace body. In other words, don't issue blank line warnings + # for this block: + # namespace { + # + # } + # + # A warning about missing end of namespace comments will be issued instead. + if IsBlankLine(line) and not nesting_state.InNamespaceBody(): + elided = clean_lines.elided + prev_line = elided[linenum - 1] + prevbrace = prev_line.rfind('{') + # TODO(unknown): Don't complain if line before blank line, and line after, + # both start with alnums and are indented the same amount. + # This ignores whitespace at the start of a namespace block + # because those are not usually indented. + if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: + # OK, we have a blank line at the start of a code block. Before we + # complain, we check if it is an exception to the rule: The previous + # non-empty line has the parameters of a function header that are indented + # 4 spaces (because they did not fit in a 80 column line when placed on + # the same line as the function name). We also check for the case where + # the previous line is indented 6 spaces, which may happen when the + # initializers of a constructor do not fit into a 80 column line. + exception = False + if Match(r' {6}\w', prev_line): # Initializer list? + # We are looking for the opening column of initializer list, which + # should be indented 4 spaces to cause 6 space indentation afterwards. + search_position = linenum-2 + while (search_position >= 0 + and Match(r' {6}\w', elided[search_position])): + search_position -= 1 + exception = (search_position >= 0 + and elided[search_position][:5] == ' :') + else: + # Search for the function arguments or an initializer list. We use a + # simple heuristic here: If the line is indented 4 spaces; and we have a + # closing paren, without the opening paren, followed by an opening brace + # or colon (for initializer lists) we assume that it is the last line of + # a function header. If we have a colon indented 4 spaces, it is an + # initializer list. + exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', + prev_line) + or Match(r' {4}:', prev_line)) + + if not exception: + error(filename, linenum, 'whitespace/blank_line', 2, + 'Redundant blank line at the start of a code block ' + 'should be deleted.') + # Ignore blank lines at the end of a block in a long if-else + # chain, like this: + # if (condition1) { + # // Something followed by a blank line + # + # } else if (condition2) { + # // Something else + # } + if linenum + 1 < clean_lines.NumLines(): + next_line = raw[linenum + 1] + if (next_line + and Match(r'\s*}', next_line) + and next_line.find('} else ') == -1): + error(filename, linenum, 'whitespace/blank_line', 3, + 'Redundant blank line at the end of a code block ' + 'should be deleted.') + + matched = Match(r'\s*(public|protected|private):', prev_line) + if matched: + error(filename, linenum, 'whitespace/blank_line', 3, + 'Do not leave a blank line after "%s:"' % matched.group(1)) + + # Next, we complain if there's a comment too near the text + commentpos = line.find('//') + if commentpos != -1: + # Check if the // may be in quotes. If so, ignore it + # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison + if (line.count('"', 0, commentpos) - + line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes + # Allow one space for new scopes, two spaces otherwise: + if (not Match(r'^\s*{ //', line) and + ((commentpos >= 1 and + line[commentpos-1] not in string.whitespace) or + (commentpos >= 2 and + line[commentpos-2] not in string.whitespace))): + error(filename, linenum, 'whitespace/comments', 2, + 'At least two spaces is best between code and comments') + # There should always be a space between the // and the comment + commentend = commentpos + 2 + if commentend < len(line) and not line[commentend] == ' ': + # but some lines are exceptions -- e.g. if they're big + # comment delimiters like: + # //---------------------------------------------------------- + # or are an empty C++ style Doxygen comment, like: + # /// + # or C++ style Doxygen comments placed after the variable: + # ///< Header comment + # //!< Header comment + # or they begin with multiple slashes followed by a space: + # //////// Header comment + match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or + Search(r'^/$', line[commentend:]) or + Search(r'^!< ', line[commentend:]) or + Search(r'^/< ', line[commentend:]) or + Search(r'^/+ ', line[commentend:])) + if not match: + error(filename, linenum, 'whitespace/comments', 4, + 'Should have a space between // and comment') + CheckComment(line[commentpos:], filename, linenum, error) + + line = clean_lines.elided[linenum] # get rid of comments and strings + + # Don't try to do spacing checks for operator methods + line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line) + + # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". + # Otherwise not. Note we only check for non-spaces on *both* sides; + # sometimes people put non-spaces on one side when aligning ='s among + # many lines (not that this is behavior that I approve of...) + if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line): + error(filename, linenum, 'whitespace/operators', 4, + 'Missing spaces around =') + + # It's ok not to have spaces around binary operators like + - * /, but if + # there's too little whitespace, we get concerned. It's hard to tell, + # though, so we punt on this one for now. TODO. + + # You should always have whitespace around binary operators. + # + # Check <= and >= first to avoid false positives with < and >, then + # check non-include lines for spacing around < and >. + match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line) + if match: + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around %s' % match.group(1)) + # We allow no-spaces around << when used like this: 10<<20, but + # not otherwise (particularly, not when used as streams) + # Also ignore using ns::operator<<; + match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line) + if (match and + not (match.group(1).isdigit() and match.group(2).isdigit()) and + not (match.group(1) == 'operator' and match.group(2) == ';')): + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around <<') + elif not Match(r'#.*include', line): + # Avoid false positives on -> + reduced_line = line.replace('->', '') + + # Look for < that is not surrounded by spaces. This is only + # triggered if both sides are missing spaces, even though + # technically should should flag if at least one side is missing a + # space. This is done to avoid some false positives with shifts. + match = Search(r'[^\s<]<([^\s=<].*)', reduced_line) + if (match and + not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))): + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around <') + + # Look for > that is not surrounded by spaces. Similar to the + # above, we only trigger if both sides are missing spaces to avoid + # false positives with shifts. + match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line) + if (match and + not FindPreviousMatchingAngleBracket(clean_lines, linenum, + match.group(1))): + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around >') + + # We allow no-spaces around >> for almost anything. This is because + # C++11 allows ">>" to close nested templates, which accounts for + # most cases when ">>" is not followed by a space. + # + # We still warn on ">>" followed by alpha character, because that is + # likely due to ">>" being used for right shifts, e.g.: + # value >> alpha + # + # When ">>" is used to close templates, the alphanumeric letter that + # follows would be part of an identifier, and there should still be + # a space separating the template type and the identifier. + # type> alpha + match = Search(r'>>[a-zA-Z_]', line) + if match: + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around >>') + + # There shouldn't be space around unary operators + match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) + if match: + error(filename, linenum, 'whitespace/operators', 4, + 'Extra space for operator %s' % match.group(1)) + + # A pet peeve of mine: no spaces after an if, while, switch, or for + match = Search(r' (if\(|for\(|while\(|switch\()', line) + if match: + error(filename, linenum, 'whitespace/parens', 5, + 'Missing space before ( in %s' % match.group(1)) + + # For if/for/while/switch, the left and right parens should be + # consistent about how many spaces are inside the parens, and + # there should either be zero or one spaces inside the parens. + # We don't want: "if ( foo)" or "if ( foo )". + # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. + match = Search(r'\b(if|for|while|switch)\s*' + r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', + line) + if match: + if len(match.group(2)) != len(match.group(4)): + if not (match.group(3) == ';' and + len(match.group(2)) == 1 + len(match.group(4)) or + not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): + error(filename, linenum, 'whitespace/parens', 5, + 'Mismatching spaces inside () in %s' % match.group(1)) + if len(match.group(2)) not in [0, 1]: + error(filename, linenum, 'whitespace/parens', 5, + 'Should have zero or one spaces inside ( and ) in %s' % + match.group(1)) + + # You should always have a space after a comma (either as fn arg or operator) + # + # This does not apply when the non-space character following the + # comma is another comma, since the only time when that happens is + # for empty macro arguments. + # + # We run this check in two passes: first pass on elided lines to + # verify that lines contain missing whitespaces, second pass on raw + # lines to confirm that those missing whitespaces are not due to + # elided comments. + if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]): + error(filename, linenum, 'whitespace/comma', 3, + 'Missing space after ,') + + # You should always have a space after a semicolon + # except for few corner cases + # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more + # space after ; + if Search(r';[^\s};\\)/]', line): + error(filename, linenum, 'whitespace/semicolon', 3, + 'Missing space after ;') + + # Next we will look for issues with function calls. + CheckSpacingForFunctionCall(filename, line, linenum, error) + + # Except after an opening paren, or after another opening brace (in case of + # an initializer list, for instance), you should have spaces before your + # braces. And since you should never have braces at the beginning of a line, + # this is an easy test. + match = Match(r'^(.*[^ ({]){', line) + if match: + # Try a bit harder to check for brace initialization. This + # happens in one of the following forms: + # Constructor() : initializer_list_{} { ... } + # Constructor{}.MemberFunction() + # Type variable{}; + # FunctionCall(type{}, ...); + # LastArgument(..., type{}); + # LOG(INFO) << type{} << " ..."; + # map_of_type[{...}] = ...; + # + # We check for the character following the closing brace, and + # silence the warning if it's one of those listed above, i.e. + # "{.;,)<]". + # + # To account for nested initializer list, we allow any number of + # closing braces up to "{;,)<". We can't simply silence the + # warning on first sight of closing brace, because that would + # cause false negatives for things that are not initializer lists. + # Silence this: But not this: + # Outer{ if (...) { + # Inner{...} if (...){ // Missing space before { + # }; } + # + # There is a false negative with this approach if people inserted + # spurious semicolons, e.g. "if (cond){};", but we will catch the + # spurious semicolon with a separate check. + (endline, endlinenum, endpos) = CloseExpression( + clean_lines, linenum, len(match.group(1))) + trailing_text = '' + if endpos > -1: + trailing_text = endline[endpos:] + for offset in xrange(endlinenum + 1, + min(endlinenum + 3, clean_lines.NumLines() - 1)): + trailing_text += clean_lines.elided[offset] + if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text): + error(filename, linenum, 'whitespace/braces', 5, + 'Missing space before {') + + # Make sure '} else {' has spaces. + if Search(r'}else', line): + error(filename, linenum, 'whitespace/braces', 5, + 'Missing space before else') + + # You shouldn't have spaces before your brackets, except maybe after + # 'delete []' or 'new char * []'. + if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line): + error(filename, linenum, 'whitespace/braces', 5, + 'Extra space before [') + + # You shouldn't have a space before a semicolon at the end of the line. + # There's a special case for "for" since the style guide allows space before + # the semicolon there. + if Search(r':\s*;\s*$', line): + error(filename, linenum, 'whitespace/semicolon', 5, + 'Semicolon defining empty statement. Use {} instead.') + elif Search(r'^\s*;\s*$', line): + error(filename, linenum, 'whitespace/semicolon', 5, + 'Line contains only semicolon. If this should be an empty statement, ' + 'use {} instead.') + elif (Search(r'\s+;\s*$', line) and + not Search(r'\bfor\b', line)): + error(filename, linenum, 'whitespace/semicolon', 5, + 'Extra space before last semicolon. If this should be an empty ' + 'statement, use {} instead.') + + # In range-based for, we wanted spaces before and after the colon, but + # not around "::" tokens that might appear. + if (Search('for *\(.*[^:]:[^: ]', line) or + Search('for *\(.*[^: ]:[^:]', line)): + error(filename, linenum, 'whitespace/forcolon', 2, + 'Missing space around colon in range-based for loop') + + +def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): + """Checks for additional blank line issues related to sections. + + Currently the only thing checked here is blank line before protected/private. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + class_info: A _ClassInfo objects. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + # Skip checks if the class is small, where small means 25 lines or less. + # 25 lines seems like a good cutoff since that's the usual height of + # terminals, and any class that can't fit in one screen can't really + # be considered "small". + # + # Also skip checks if we are on the first line. This accounts for + # classes that look like + # class Foo { public: ... }; + # + # If we didn't find the end of the class, last_line would be zero, + # and the check will be skipped by the first condition. + if (class_info.last_line - class_info.starting_linenum <= 24 or + linenum <= class_info.starting_linenum): + return + + matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) + if matched: + # Issue warning if the line before public/protected/private was + # not a blank line, but don't do this if the previous line contains + # "class" or "struct". This can happen two ways: + # - We are at the beginning of the class. + # - We are forward-declaring an inner class that is semantically + # private, but needed to be public for implementation reasons. + # Also ignores cases where the previous line ends with a backslash as can be + # common when defining classes in C macros. + prev_line = clean_lines.lines[linenum - 1] + if (not IsBlankLine(prev_line) and + not Search(r'\b(class|struct)\b', prev_line) and + not Search(r'\\$', prev_line)): + # Try a bit harder to find the beginning of the class. This is to + # account for multi-line base-specifier lists, e.g.: + # class Derived + # : public Base { + end_class_head = class_info.starting_linenum + for i in range(class_info.starting_linenum, linenum): + if Search(r'\{\s*$', clean_lines.lines[i]): + end_class_head = i + break + if end_class_head < linenum - 1: + error(filename, linenum, 'whitespace/blank_line', 3, + '"%s:" should be preceded by a blank line' % matched.group(1)) + + +def GetPreviousNonBlankLine(clean_lines, linenum): + """Return the most recent non-blank line and its line number. + + Args: + clean_lines: A CleansedLines instance containing the file contents. + linenum: The number of the line to check. + + Returns: + A tuple with two elements. The first element is the contents of the last + non-blank line before the current line, or the empty string if this is the + first non-blank line. The second is the line number of that line, or -1 + if this is the first non-blank line. + """ + + prevlinenum = linenum - 1 + while prevlinenum >= 0: + prevline = clean_lines.elided[prevlinenum] + if not IsBlankLine(prevline): # if not a blank line... + return (prevline, prevlinenum) + prevlinenum -= 1 + return ('', -1) + + +def CheckBraces(filename, clean_lines, linenum, error): + """Looks for misplaced braces (e.g. at the end of line). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[linenum] # get rid of comments and strings + + if Match(r'\s*{\s*$', line): + # We allow an open brace to start a line in the case where someone is using + # braces in a block to explicitly create a new scope, which is commonly used + # to control the lifetime of stack-allocated variables. Braces are also + # used for brace initializers inside function calls. We don't detect this + # perfectly: we just don't complain if the last non-whitespace character on + # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the + # previous line starts a preprocessor block. + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if (not Search(r'[,;:}{(]\s*$', prevline) and + not Match(r'\s*#', prevline)): + error(filename, linenum, 'whitespace/braces', 4, + '{ should almost always be at the end of the previous line') + + # An else clause should be on the same line as the preceding closing brace. + if Match(r'\s*else\s*', line): + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if Match(r'\s*}\s*$', prevline): + error(filename, linenum, 'whitespace/newline', 4, + 'An else should appear on the same line as the preceding }') + + # If braces come on one side of an else, they should be on both. + # However, we have to worry about "else if" that spans multiple lines! + if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): + if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if + # find the ( after the if + pos = line.find('else if') + pos = line.find('(', pos) + if pos > 0: + (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) + if endline[endpos:].find('{') == -1: # must be brace after if + error(filename, linenum, 'readability/braces', 5, + 'If an else has a brace on one side, it should have it on both') + else: # common case: else not followed by a multi-line if + error(filename, linenum, 'readability/braces', 5, + 'If an else has a brace on one side, it should have it on both') + + # Likewise, an else should never have the else clause on the same line + if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): + error(filename, linenum, 'whitespace/newline', 4, + 'Else clause should never be on same line as else (use 2 lines)') + + # In the same way, a do/while should never be on one line + if Match(r'\s*do [^\s{]', line): + error(filename, linenum, 'whitespace/newline', 4, + 'do/while clauses should not be on a single line') + + # Block bodies should not be followed by a semicolon. Due to C++11 + # brace initialization, there are more places where semicolons are + # required than not, so we use a whitelist approach to check these + # rather than a blacklist. These are the places where "};" should + # be replaced by just "}": + # 1. Some flavor of block following closing parenthesis: + # for (;;) {}; + # while (...) {}; + # switch (...) {}; + # Function(...) {}; + # if (...) {}; + # if (...) else if (...) {}; + # + # 2. else block: + # if (...) else {}; + # + # 3. const member function: + # Function(...) const {}; + # + # 4. Block following some statement: + # x = 42; + # {}; + # + # 5. Block at the beginning of a function: + # Function(...) { + # {}; + # } + # + # Note that naively checking for the preceding "{" will also match + # braces inside multi-dimensional arrays, but this is fine since + # that expression will not contain semicolons. + # + # 6. Block following another block: + # while (true) {} + # {}; + # + # 7. End of namespaces: + # namespace {}; + # + # These semicolons seems far more common than other kinds of + # redundant semicolons, possibly due to people converting classes + # to namespaces. For now we do not warn for this case. + # + # Try matching case 1 first. + match = Match(r'^(.*\)\s*)\{', line) + if match: + # Matched closing parenthesis (case 1). Check the token before the + # matching opening parenthesis, and don't warn if it looks like a + # macro. This avoids these false positives: + # - macro that defines a base class + # - multi-line macro that defines a base class + # - macro that defines the whole class-head + # + # But we still issue warnings for macros that we know are safe to + # warn, specifically: + # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P + # - TYPED_TEST + # - INTERFACE_DEF + # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: + # + # We implement a whitelist of safe macros instead of a blacklist of + # unsafe macros, even though the latter appears less frequently in + # google code and would have been easier to implement. This is because + # the downside for getting the whitelist wrong means some extra + # semicolons, while the downside for getting the blacklist wrong + # would result in compile errors. + # + # In addition to macros, we also don't want to warn on compound + # literals. + closing_brace_pos = match.group(1).rfind(')') + opening_parenthesis = ReverseCloseExpression( + clean_lines, linenum, closing_brace_pos) + if opening_parenthesis[2] > -1: + line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] + macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) + if ((macro and + macro.group(1) not in ( + 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', + 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', + 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or + Search(r'\s+=\s*$', line_prefix)): + match = None + + else: + # Try matching cases 2-3. + match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) + if not match: + # Try matching cases 4-6. These are always matched on separate lines. + # + # Note that we can't simply concatenate the previous line to the + # current line and do a single match, otherwise we may output + # duplicate warnings for the blank line case: + # if (cond) { + # // blank line + # } + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if prevline and Search(r'[;{}]\s*$', prevline): + match = Match(r'^(\s*)\{', line) + + # Check matching closing brace + if match: + (endline, endlinenum, endpos) = CloseExpression( + clean_lines, linenum, len(match.group(1))) + if endpos > -1 and Match(r'^\s*;', endline[endpos:]): + # Current {} pair is eligible for semicolon check, and we have found + # the redundant semicolon, output warning here. + # + # Note: because we are scanning forward for opening braces, and + # outputting warnings for the matching closing brace, if there are + # nested blocks with trailing semicolons, we will get the error + # messages in reversed order. + error(filename, endlinenum, 'readability/braces', 4, + "You don't need a ; after a }") + + +def CheckEmptyBlockBody(filename, clean_lines, linenum, error): + """Look for empty loop/conditional body with only a single semicolon. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + # Search for loop keywords at the beginning of the line. Because only + # whitespaces are allowed before the keywords, this will also ignore most + # do-while-loops, since those lines should start with closing brace. + # + # We also check "if" blocks here, since an empty conditional block + # is likely an error. + line = clean_lines.elided[linenum] + matched = Match(r'\s*(for|while|if)\s*\(', line) + if matched: + # Find the end of the conditional expression + (end_line, end_linenum, end_pos) = CloseExpression( + clean_lines, linenum, line.find('(')) + + # Output warning if what follows the condition expression is a semicolon. + # No warning for all other cases, including whitespace or newline, since we + # have a separate check for semicolons preceded by whitespace. + if end_pos >= 0 and Match(r';', end_line[end_pos:]): + if matched.group(1) == 'if': + error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, + 'Empty conditional bodies should use {}') + else: + error(filename, end_linenum, 'whitespace/empty_loop_body', 5, + 'Empty loop bodies should use {} or continue') + + +def CheckCheck(filename, clean_lines, linenum, error): + """Checks the use of CHECK and EXPECT macros. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + # Decide the set of replacement macros that should be suggested + lines = clean_lines.elided + check_macro = None + start_pos = -1 + for macro in _CHECK_MACROS: + i = lines[linenum].find(macro) + if i >= 0: + check_macro = macro + + # Find opening parenthesis. Do a regular expression match here + # to make sure that we are matching the expected CHECK macro, as + # opposed to some other macro that happens to contain the CHECK + # substring. + matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum]) + if not matched: + continue + start_pos = len(matched.group(1)) + break + if not check_macro or start_pos < 0: + # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT' + return + + # Find end of the boolean expression by matching parentheses + (last_line, end_line, end_pos) = CloseExpression( + clean_lines, linenum, start_pos) + if end_pos < 0: + return + if linenum == end_line: + expression = lines[linenum][start_pos + 1:end_pos - 1] + else: + expression = lines[linenum][start_pos + 1:] + for i in xrange(linenum + 1, end_line): + expression += lines[i] + expression += last_line[0:end_pos - 1] + + # Parse expression so that we can take parentheses into account. + # This avoids false positives for inputs like "CHECK((a < 4) == b)", + # which is not replaceable by CHECK_LE. + lhs = '' + rhs = '' + operator = None + while expression: + matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' + r'==|!=|>=|>|<=|<|\()(.*)$', expression) + if matched: + token = matched.group(1) + if token == '(': + # Parenthesized operand + expression = matched.group(2) + (end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')') + if end < 0: + return # Unmatched parenthesis + lhs += '(' + expression[0:end] + expression = expression[end:] + elif token in ('&&', '||'): + # Logical and/or operators. This means the expression + # contains more than one term, for example: + # CHECK(42 < a && a < b); + # + # These are not replaceable with CHECK_LE, so bail out early. + return + elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): + # Non-relational operator + lhs += token + expression = matched.group(2) + else: + # Relational operator + operator = token + rhs = matched.group(2) + break + else: + # Unparenthesized operand. Instead of appending to lhs one character + # at a time, we do another regular expression match to consume several + # characters at once if possible. Trivial benchmark shows that this + # is more efficient when the operands are longer than a single + # character, which is generally the case. + matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) + if not matched: + matched = Match(r'^(\s*\S)(.*)$', expression) + if not matched: + break + lhs += matched.group(1) + expression = matched.group(2) + + # Only apply checks if we got all parts of the boolean expression + if not (lhs and operator and rhs): + return + + # Check that rhs do not contain logical operators. We already know + # that lhs is fine since the loop above parses out && and ||. + if rhs.find('&&') > -1 or rhs.find('||') > -1: + return + + # At least one of the operands must be a constant literal. This is + # to avoid suggesting replacements for unprintable things like + # CHECK(variable != iterator) + # + # The following pattern matches decimal, hex integers, strings, and + # characters (in that order). + lhs = lhs.strip() + rhs = rhs.strip() + match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' + if Match(match_constant, lhs) or Match(match_constant, rhs): + # Note: since we know both lhs and rhs, we can provide a more + # descriptive error message like: + # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) + # Instead of: + # Consider using CHECK_EQ instead of CHECK(a == b) + # + # We are still keeping the less descriptive message because if lhs + # or rhs gets long, the error message might become unreadable. + error(filename, linenum, 'readability/check', 2, + 'Consider using %s instead of %s(a %s b)' % ( + _CHECK_REPLACEMENT[check_macro][operator], + check_macro, operator)) + + +def CheckAltTokens(filename, clean_lines, linenum, error): + """Check alternative keywords being used in boolean expressions. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Avoid preprocessor lines + if Match(r'^\s*#', line): + return + + # Last ditch effort to avoid multi-line comments. This will not help + # if the comment started before the current line or ended after the + # current line, but it catches most of the false positives. At least, + # it provides a way to workaround this warning for people who use + # multi-line comments in preprocessor macros. + # + # TODO(unknown): remove this once cpplint has better support for + # multi-line comments. + if line.find('/*') >= 0 or line.find('*/') >= 0: + return + + for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): + error(filename, linenum, 'readability/alt_tokens', 2, + 'Use operator %s instead of %s' % ( + _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) + + +def GetLineWidth(line): + """Determines the width of the line in column positions. + + Args: + line: A string, which may be a Unicode string. + + Returns: + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ + if isinstance(line, unicode): + width = 0 + for uc in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(uc) in ('W', 'F'): + width += 2 + elif not unicodedata.combining(uc): + width += 1 + return width + else: + return len(line) + + +def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, + error): + """Checks rules from the 'C++ style rules' section of cppguide.html. + + Most of these rules are hard to test (naming, comment style), but we + do what we can. In particular we check for 2-space indents, line lengths, + tab usage, spaces inside code, etc. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + + # Don't use "elided" lines here, otherwise we can't check commented lines. + # Don't want to use "raw" either, because we don't want to check inside C++11 + # raw strings, + raw_lines = clean_lines.lines_without_raw_strings + line = raw_lines[linenum] + + if line.find('\t') != -1: + error(filename, linenum, 'whitespace/tab', 1, + 'Tab found; better to use spaces') + + # One or three blank spaces at the beginning of the line is weird; it's + # hard to reconcile that with 2-space indents. + # NOTE: here are the conditions rob pike used for his tests. Mine aren't + # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces + # if(RLENGTH > 20) complain = 0; + # if(match($0, " +(error|private|public|protected):")) complain = 0; + # if(match(prev, "&& *$")) complain = 0; + # if(match(prev, "\\|\\| *$")) complain = 0; + # if(match(prev, "[\",=><] *$")) complain = 0; + # if(match($0, " <<")) complain = 0; + # if(match(prev, " +for \\(")) complain = 0; + # if(prevodd && match(prevprev, " +for \\(")) complain = 0; + initial_spaces = 0 + cleansed_line = clean_lines.elided[linenum] + while initial_spaces < len(line) and line[initial_spaces] == ' ': + initial_spaces += 1 + if line and line[-1].isspace(): + error(filename, linenum, 'whitespace/end_of_line', 4, + 'Line ends in whitespace. Consider deleting these extra spaces.') + # There are certain situations we allow one space, notably for section labels + elif ((initial_spaces == 1 or initial_spaces == 3) and + not Match(r'\s*\w+\s*:\s*$', cleansed_line)): + error(filename, linenum, 'whitespace/indent', 3, + 'Weird number of spaces at line-start. ' + 'Are you using a 2-space indent?') + + # Check if the line is a header guard. + is_header_guard = False + if file_extension == 'h': + cppvar = GetHeaderGuardCPPVariable(filename) + if (line.startswith('#ifndef %s' % cppvar) or + line.startswith('#define %s' % cppvar) or + line.startswith('#endif // %s' % cppvar)): + is_header_guard = True + # #include lines and header guards can be long, since there's no clean way to + # split them. + # + # URLs can be long too. It's possible to split these, but it makes them + # harder to cut&paste. + # + # The "$Id:...$" comment may also get very long without it being the + # developers fault. + if (not line.startswith('#include') and not is_header_guard and + not Match(r'^\s*//.*http(s?)://\S*$', line) and + not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): + line_width = GetLineWidth(line) + extended_length = int((_line_length * 1.25)) + if line_width > extended_length: + error(filename, linenum, 'whitespace/line_length', 4, + 'Lines should very rarely be longer than %i characters' % + extended_length) + elif line_width > _line_length: + error(filename, linenum, 'whitespace/line_length', 2, + 'Lines should be <= %i characters long' % _line_length) + + if (cleansed_line.count(';') > 1 and + # for loops are allowed two ;'s (and may run over two lines). + cleansed_line.find('for') == -1 and + (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or + GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and + # It's ok to have many commands in a switch case that fits in 1 line + not ((cleansed_line.find('case ') != -1 or + cleansed_line.find('default:') != -1) and + cleansed_line.find('break;') != -1)): + error(filename, linenum, 'whitespace/newline', 0, + 'More than one command on the same line') + + # Some more style checks + CheckBraces(filename, clean_lines, linenum, error) + CheckEmptyBlockBody(filename, clean_lines, linenum, error) + CheckAccess(filename, clean_lines, linenum, nesting_state, error) + CheckSpacing(filename, clean_lines, linenum, nesting_state, error) + CheckCheck(filename, clean_lines, linenum, error) + CheckAltTokens(filename, clean_lines, linenum, error) + classinfo = nesting_state.InnermostClass() + if classinfo: + CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) + + +_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"') +_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') +# Matches the first component of a filename delimited by -s and _s. That is: +# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' +_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') + + +def _DropCommonSuffixes(filename): + """Drops common suffixes like _test.cc or -inl.h from filename. + + For example: + >>> _DropCommonSuffixes('foo/foo-inl.h') + 'foo/foo' + >>> _DropCommonSuffixes('foo/bar/foo.cc') + 'foo/bar/foo' + >>> _DropCommonSuffixes('foo/foo_internal.h') + 'foo/foo' + >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') + 'foo/foo_unusualinternal' + + Args: + filename: The input filename. + + Returns: + The filename with the common suffix removed. + """ + for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', + 'inl.h', 'impl.h', 'internal.h'): + if (filename.endswith(suffix) and len(filename) > len(suffix) and + filename[-len(suffix) - 1] in ('-', '_')): + return filename[:-len(suffix) - 1] + return os.path.splitext(filename)[0] + + +def _IsTestFilename(filename): + """Determines if the given filename has a suffix that identifies it as a test. + + Args: + filename: The input filename. + + Returns: + True if 'filename' looks like a test, False otherwise. + """ + if (filename.endswith('_test.cc') or + filename.endswith('_unittest.cc') or + filename.endswith('_regtest.cc')): + return True + else: + return False + + +def _ClassifyInclude(fileinfo, include, is_system): + """Figures out what kind of header 'include' is. + + Args: + fileinfo: The current file cpplint is running over. A FileInfo instance. + include: The path to a #included file. + is_system: True if the #include used <> rather than "". + + Returns: + One of the _XXX_HEADER constants. + + For example: + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) + _C_SYS_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) + _CPP_SYS_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) + _LIKELY_MY_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), + ... 'bar/foo_other_ext.h', False) + _POSSIBLE_MY_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) + _OTHER_HEADER + """ + # This is a list of all standard c++ header files, except + # those already checked for above. + is_cpp_h = include in _CPP_HEADERS + + if is_system: + if is_cpp_h: + return _CPP_SYS_HEADER + else: + return _C_SYS_HEADER + + # If the target file and the include we're checking share a + # basename when we drop common extensions, and the include + # lives in . , then it's likely to be owned by the target file. + target_dir, target_base = ( + os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) + include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) + if target_base == include_base and ( + include_dir == target_dir or + include_dir == os.path.normpath(target_dir + '/../public')): + return _LIKELY_MY_HEADER + + # If the target and include share some initial basename + # component, it's possible the target is implementing the + # include, so it's allowed to be first, but we'll never + # complain if it's not there. + target_first_component = _RE_FIRST_COMPONENT.match(target_base) + include_first_component = _RE_FIRST_COMPONENT.match(include_base) + if (target_first_component and include_first_component and + target_first_component.group(0) == + include_first_component.group(0)): + return _POSSIBLE_MY_HEADER + + return _OTHER_HEADER + + + +def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): + """Check rules that are applicable to #include lines. + + Strings on #include lines are NOT removed from elided line, to make + certain tasks easier. However, to prevent false positives, checks + applicable to #include lines in CheckLanguage must be put here. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + include_state: An _IncludeState instance in which the headers are inserted. + error: The function to call with any errors found. + """ + fileinfo = FileInfo(filename) + + line = clean_lines.lines[linenum] + + # "include" should use the new style "foo/bar.h" instead of just "bar.h" + if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line): + error(filename, linenum, 'build/include_dir', 4, + 'Include the directory when naming .h files') + + # we shouldn't include a file more than once. actually, there are a + # handful of instances where doing so is okay, but in general it's + # not. + match = _RE_PATTERN_INCLUDE.search(line) + if match: + include = match.group(2) + is_system = (match.group(1) == '<') + if include in include_state: + error(filename, linenum, 'build/include', 4, + '"%s" already included at %s:%s' % + (include, filename, include_state[include])) + else: + include_state[include] = linenum + + # We want to ensure that headers appear in the right order: + # 1) for foo.cc, foo.h (preferred location) + # 2) c system files + # 3) cpp system files + # 4) for foo.cc, foo.h (deprecated location) + # 5) other google headers + # + # We classify each include statement as one of those 5 types + # using a number of techniques. The include_state object keeps + # track of the highest type seen, and complains if we see a + # lower type after that. + error_message = include_state.CheckNextIncludeOrder( + _ClassifyInclude(fileinfo, include, is_system)) + if error_message: + error(filename, linenum, 'build/include_order', 4, + '%s. Should be: %s.h, c system, c++ system, other.' % + (error_message, fileinfo.BaseName())) + canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) + if not include_state.IsInAlphabeticalOrder( + clean_lines, linenum, canonical_include): + error(filename, linenum, 'build/include_alpha', 4, + 'Include "%s" not in alphabetical order' % include) + include_state.SetLastHeader(canonical_include) + + # Look for any of the stream classes that are part of standard C++. + match = _RE_PATTERN_INCLUDE.match(line) + if match: + include = match.group(2) + if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): + # Many unit tests use cout, so we exempt them. + if not _IsTestFilename(filename): + error(filename, linenum, 'readability/streams', 3, + 'Streams are highly discouraged.') + + +def _GetTextInside(text, start_pattern): + r"""Retrieves all the text between matching open and close parentheses. + + Given a string of lines and a regular expression string, retrieve all the text + following the expression and between opening punctuation symbols like + (, [, or {, and the matching close-punctuation symbol. This properly nested + occurrences of the punctuations, so for the text like + printf(a(), b(c())); + a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. + start_pattern must match string having an open punctuation symbol at the end. + + Args: + text: The lines to extract text. Its comments and strings must be elided. + It can be single line and can span multiple lines. + start_pattern: The regexp string indicating where to start extracting + the text. + Returns: + The extracted text. + None if either the opening string or ending punctuation could not be found. + """ + # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably + # rewritten to use _GetTextInside (and use inferior regexp matching today). + + # Give opening punctuations to get the matching close-punctuations. + matching_punctuation = {'(': ')', '{': '}', '[': ']'} + closing_punctuation = set(matching_punctuation.itervalues()) + + # Find the position to start extracting text. + match = re.search(start_pattern, text, re.M) + if not match: # start_pattern not found in text. + return None + start_position = match.end(0) + + assert start_position > 0, ( + 'start_pattern must ends with an opening punctuation.') + assert text[start_position - 1] in matching_punctuation, ( + 'start_pattern must ends with an opening punctuation.') + # Stack of closing punctuations we expect to have in text after position. + punctuation_stack = [matching_punctuation[text[start_position - 1]]] + position = start_position + while punctuation_stack and position < len(text): + if text[position] == punctuation_stack[-1]: + punctuation_stack.pop() + elif text[position] in closing_punctuation: + # A closing punctuation without matching opening punctuations. + return None + elif text[position] in matching_punctuation: + punctuation_stack.append(matching_punctuation[text[position]]) + position += 1 + if punctuation_stack: + # Opening punctuations left without matching close-punctuations. + return None + # punctuations match. + return text[start_position:position - 1] + + +# Patterns for matching call-by-reference parameters. +# +# Supports nested templates up to 2 levels deep using this messy pattern: +# < (?: < (?: < [^<>]* +# > +# | [^<>] )* +# > +# | [^<>] )* +# > +_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* +_RE_PATTERN_TYPE = ( + r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' + r'(?:\w|' + r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' + r'::)+') +# A call-by-reference parameter ends with '& identifier'. +_RE_PATTERN_REF_PARAM = re.compile( + r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' + r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') +# A call-by-const-reference parameter either ends with 'const& identifier' +# or looks like 'const type& identifier' when 'type' is atomic. +_RE_PATTERN_CONST_REF_PARAM = ( + r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') + + +def CheckLanguage(filename, clean_lines, linenum, file_extension, + include_state, nesting_state, error): + """Checks rules from the 'C++ language rules' section of cppguide.html. + + Some of these rules are hard to test (function overloading, using + uint32 inappropriately), but we do the best we can. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + include_state: An _IncludeState instance in which the headers are inserted. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + # If the line is empty or consists of entirely a comment, no need to + # check it. + line = clean_lines.elided[linenum] + if not line: + return + + match = _RE_PATTERN_INCLUDE.search(line) + if match: + CheckIncludeLine(filename, clean_lines, linenum, include_state, error) + return + + # Reset include state across preprocessor directives. This is meant + # to silence warnings for conditional includes. + if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line): + include_state.ResetSection() + + # Make Windows paths like Unix. + fullname = os.path.abspath(filename).replace('\\', '/') + + # TODO(unknown): figure out if they're using default arguments in fn proto. + + # Check to see if they're using an conversion function cast. + # I just try to capture the most common basic types, though there are more. + # Parameterless conversion functions, such as bool(), are allowed as they are + # probably a member operator declaration or default constructor. + match = Search( + r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there + r'(int|float|double|bool|char|int32|uint32|int64|uint64)' + r'(\([^)].*)', line) + if match: + matched_new = match.group(1) + matched_type = match.group(2) + matched_funcptr = match.group(3) + + # gMock methods are defined using some variant of MOCK_METHODx(name, type) + # where type may be float(), int(string), etc. Without context they are + # virtually indistinguishable from int(x) casts. Likewise, gMock's + # MockCallback takes a template parameter of the form return_type(arg_type), + # which looks much like the cast we're trying to detect. + # + # std::function<> wrapper has a similar problem. + # + # Return types for function pointers also look like casts if they + # don't have an extra space. + if (matched_new is None and # If new operator, then this isn't a cast + not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or + Search(r'\bMockCallback<.*>', line) or + Search(r'\bstd::function<.*>', line)) and + not (matched_funcptr and + Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', + matched_funcptr))): + # Try a bit harder to catch gmock lines: the only place where + # something looks like an old-style cast is where we declare the + # return type of the mocked method, and the only time when we + # are missing context is if MOCK_METHOD was split across + # multiple lines. The missing MOCK_METHOD is usually one or two + # lines back, so scan back one or two lines. + # + # It's not possible for gmock macros to appear in the first 2 + # lines, since the class head + section name takes up 2 lines. + if (linenum < 2 or + not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', + clean_lines.elided[linenum - 1]) or + Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', + clean_lines.elided[linenum - 2]))): + error(filename, linenum, 'readability/casting', 4, + 'Using deprecated casting style. ' + 'Use static_cast<%s>(...) instead' % + matched_type) + + CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], + 'static_cast', + r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) + + # This doesn't catch all cases. Consider (const char * const)"hello". + # + # (char *) "foo" should always be a const_cast (reinterpret_cast won't + # compile). + if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], + 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): + pass + else: + # Check pointer casts for other than string constants + CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], + 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) + + # In addition, we look for people taking the address of a cast. This + # is dangerous -- casts can assign to temporaries, so the pointer doesn't + # point where you think. + match = Search( + r'(?:&\(([^)]+)\)[\w(])|' + r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line) + if match and match.group(1) != '*': + error(filename, linenum, 'runtime/casting', 4, + ('Are you taking an address of a cast? ' + 'This is dangerous: could be a temp var. ' + 'Take the address before doing the cast, rather than after')) + + # Create an extended_line, which is the concatenation of the current and + # next lines, for more effective checking of code that may span more than one + # line. + if linenum + 1 < clean_lines.NumLines(): + extended_line = line + clean_lines.elided[linenum + 1] + else: + extended_line = line + + # Check for people declaring static/global STL strings at the top level. + # This is dangerous because the C++ language does not guarantee that + # globals with constructors are initialized before the first access. + match = Match( + r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', + line) + # Make sure it's not a function. + # Function template specialization looks like: "string foo(...". + # Class template definitions look like: "string Foo::Method(...". + # + # Also ignore things that look like operators. These are matched separately + # because operator names cross non-word boundaries. If we change the pattern + # above, we would decrease the accuracy of matching identifiers. + if (match and + not Search(r'\boperator\W', line) and + not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))): + error(filename, linenum, 'runtime/string', 4, + 'For a static/global string constant, use a C style string instead: ' + '"%schar %s[]".' % + (match.group(1), match.group(2))) + + if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): + error(filename, linenum, 'runtime/init', 4, + 'You seem to be initializing a member variable with itself.') + + if file_extension == 'h': + # TODO(unknown): check that 1-arg constructors are explicit. + # How to tell it's a constructor? + # (handled in CheckForNonStandardConstructs for now) + # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS + # (level 1 error) + pass + + # Check if people are using the verboten C basic types. The only exception + # we regularly allow is "unsigned short port" for port. + if Search(r'\bshort port\b', line): + if not Search(r'\bunsigned short port\b', line): + error(filename, linenum, 'runtime/int', 4, + 'Use "unsigned short" for ports, not "short"') + else: + match = Search(r'\b(short|long(?! +double)|long long)\b', line) + if match: + error(filename, linenum, 'runtime/int', 4, + 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) + + # When snprintf is used, the second argument shouldn't be a literal. + match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) + if match and match.group(2) != '0': + # If 2nd arg is zero, snprintf is used to calculate size. + error(filename, linenum, 'runtime/printf', 3, + 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' + 'to snprintf.' % (match.group(1), match.group(2))) + + # Check if some verboten C functions are being used. + if Search(r'\bsprintf\b', line): + error(filename, linenum, 'runtime/printf', 5, + 'Never use sprintf. Use snprintf instead.') + match = Search(r'\b(strcpy|strcat)\b', line) + if match: + error(filename, linenum, 'runtime/printf', 4, + 'Almost always, snprintf is better than %s' % match.group(1)) + + # Check if some verboten operator overloading is going on + # TODO(unknown): catch out-of-line unary operator&: + # class X {}; + # int operator&(const X& x) { return 42; } // unary operator& + # The trick is it's hard to tell apart from binary operator&: + # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& + if Search(r'\boperator\s*&\s*\(\s*\)', line): + error(filename, linenum, 'runtime/operator', 4, + 'Unary operator& is dangerous. Do not use it.') + + # Check for suspicious usage of "if" like + # } if (a == b) { + if Search(r'\}\s*if\s*\(', line): + error(filename, linenum, 'readability/braces', 4, + 'Did you mean "else if"? If not, start a new line for "if".') + + # Check for potential format string bugs like printf(foo). + # We constrain the pattern not to pick things like DocidForPrintf(foo). + # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) + # TODO(sugawarayu): Catch the following case. Need to change the calling + # convention of the whole function to process multiple line to handle it. + # printf( + # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); + printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') + if printf_args: + match = Match(r'([\w.\->()]+)$', printf_args) + if match and match.group(1) != '__VA_ARGS__': + function_name = re.search(r'\b((?:string)?printf)\s*\(', + line, re.I).group(1) + error(filename, linenum, 'runtime/printf', 4, + 'Potential format string bug. Do %s("%%s", %s) instead.' + % (function_name, match.group(1))) + + # Check for potential memset bugs like memset(buf, sizeof(buf), 0). + match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) + if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): + error(filename, linenum, 'runtime/memset', 4, + 'Did you mean "memset(%s, 0, %s)"?' + % (match.group(1), match.group(2))) + + if Search(r'\busing namespace\b', line): + error(filename, linenum, 'build/namespaces', 5, + 'Do not use namespace using-directives. ' + 'Use using-declarations instead.') + + # Detect variable-length arrays. + match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) + if (match and match.group(2) != 'return' and match.group(2) != 'delete' and + match.group(3).find(']') == -1): + # Split the size using space and arithmetic operators as delimiters. + # If any of the resulting tokens are not compile time constants then + # report the error. + tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) + is_const = True + skip_next = False + for tok in tokens: + if skip_next: + skip_next = False + continue + + if Search(r'sizeof\(.+\)', tok): continue + if Search(r'arraysize\(\w+\)', tok): continue + + tok = tok.lstrip('(') + tok = tok.rstrip(')') + if not tok: continue + if Match(r'\d+', tok): continue + if Match(r'0[xX][0-9a-fA-F]+', tok): continue + if Match(r'k[A-Z0-9]\w*', tok): continue + if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue + if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue + # A catch all for tricky sizeof cases, including 'sizeof expression', + # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' + # requires skipping the next token because we split on ' ' and '*'. + if tok.startswith('sizeof'): + skip_next = True + continue + is_const = False + break + if not is_const: + error(filename, linenum, 'runtime/arrays', 1, + 'Do not use variable-length arrays. Use an appropriately named ' + "('k' followed by CamelCase) compile-time constant for the size.") + + # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or + # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing + # in the class declaration. + match = Match( + (r'\s*' + r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' + r'\(.*\);$'), + line) + if match and linenum + 1 < clean_lines.NumLines(): + next_line = clean_lines.elided[linenum + 1] + # We allow some, but not all, declarations of variables to be present + # in the statement that defines the class. The [\w\*,\s]* fragment of + # the regular expression below allows users to declare instances of + # the class or pointers to instances, but not less common types such + # as function pointers or arrays. It's a tradeoff between allowing + # reasonable code and avoiding trying to parse more C++ using regexps. + if not Search(r'^\s*}[\w\*,\s]*;', next_line): + error(filename, linenum, 'readability/constructors', 3, + match.group(1) + ' should be the last thing in the class') + + # Check for use of unnamed namespaces in header files. Registration + # macros are typically OK, so we allow use of "namespace {" on lines + # that end with backslashes. + if (file_extension == 'h' + and Search(r'\bnamespace\s*{', line) + and line[-1] != '\\'): + error(filename, linenum, 'build/namespaces', 4, + 'Do not use unnamed namespaces in header files. See ' + 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' + ' for more information.') + +def CheckForNonConstReference(filename, clean_lines, linenum, + nesting_state, error): + """Check for non-const references. + + Separate from CheckLanguage since it scans backwards from current + line, instead of scanning forward. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + # Do nothing if there is no '&' on current line. + line = clean_lines.elided[linenum] + if '&' not in line: + return + + # Long type names may be broken across multiple lines, usually in one + # of these forms: + # LongType + # ::LongTypeContinued &identifier + # LongType:: + # LongTypeContinued &identifier + # LongType< + # ...>::LongTypeContinued &identifier + # + # If we detected a type split across two lines, join the previous + # line to current line so that we can match const references + # accordingly. + # + # Note that this only scans back one line, since scanning back + # arbitrary number of lines would be expensive. If you have a type + # that spans more than 2 lines, please use a typedef. + if linenum > 1: + previous = None + if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): + # previous_line\n + ::current_line + previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', + clean_lines.elided[linenum - 1]) + elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): + # previous_line::\n + current_line + previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', + clean_lines.elided[linenum - 1]) + if previous: + line = previous.group(1) + line.lstrip() + else: + # Check for templated parameter that is split across multiple lines + endpos = line.rfind('>') + if endpos > -1: + (_, startline, startpos) = ReverseCloseExpression( + clean_lines, linenum, endpos) + if startpos > -1 and startline < linenum: + # Found the matching < on an earlier line, collect all + # pieces up to current line. + line = '' + for i in xrange(startline, linenum + 1): + line += clean_lines.elided[i].strip() + + # Check for non-const references in function parameters. A single '&' may + # found in the following places: + # inside expression: binary & for bitwise AND + # inside expression: unary & for taking the address of something + # inside declarators: reference parameter + # We will exclude the first two cases by checking that we are not inside a + # function body, including one that was just introduced by a trailing '{'. + # TODO(unknwon): Doesn't account for preprocessor directives. + # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. + check_params = False + if not nesting_state.stack: + check_params = True # top level + elif (isinstance(nesting_state.stack[-1], _ClassInfo) or + isinstance(nesting_state.stack[-1], _NamespaceInfo)): + check_params = True # within class or namespace + elif Match(r'.*{\s*$', line): + if (len(nesting_state.stack) == 1 or + isinstance(nesting_state.stack[-2], _ClassInfo) or + isinstance(nesting_state.stack[-2], _NamespaceInfo)): + check_params = True # just opened global/class/namespace block + # We allow non-const references in a few standard places, like functions + # called "swap()" or iostream operators like "<<" or ">>". Do not check + # those function parameters. + # + # We also accept & in static_assert, which looks like a function but + # it's actually a declaration expression. + whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' + r'operator\s*[<>][<>]|' + r'static_assert|COMPILE_ASSERT' + r')\s*\(') + if Search(whitelisted_functions, line): + check_params = False + elif not Search(r'\S+\([^)]*$', line): + # Don't see a whitelisted function on this line. Actually we + # didn't see any function name on this line, so this is likely a + # multi-line parameter list. Try a bit harder to catch this case. + for i in xrange(2): + if (linenum > i and + Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): + check_params = False + break + + if check_params: + decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body + for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): + if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): + error(filename, linenum, 'runtime/references', 2, + 'Is this a non-const reference? ' + 'If so, make const or use a pointer: ' + + ReplaceAll(' *<', '<', parameter)) + + +def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern, + error): + """Checks for a C-style cast by looking for the pattern. + + Args: + filename: The name of the current file. + linenum: The number of the line to check. + line: The line of code to check. + raw_line: The raw line of code to check, with comments. + cast_type: The string for the C++ cast to recommend. This is either + reinterpret_cast, static_cast, or const_cast, depending. + pattern: The regular expression used to find C-style casts. + error: The function to call with any errors found. + + Returns: + True if an error was emitted. + False otherwise. + """ + match = Search(pattern, line) + if not match: + return False + + # Exclude lines with sizeof, since sizeof looks like a cast. + sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1]) + if sizeof_match: + return False + + # operator++(int) and operator--(int) + if (line[0:match.start(1) - 1].endswith(' operator++') or + line[0:match.start(1) - 1].endswith(' operator--')): + return False + + # A single unnamed argument for a function tends to look like old + # style cast. If we see those, don't issue warnings for deprecated + # casts, instead issue warnings for unnamed arguments where + # appropriate. + # + # These are things that we want warnings for, since the style guide + # explicitly require all parameters to be named: + # Function(int); + # Function(int) { + # ConstMember(int) const; + # ConstMember(int) const { + # ExceptionMember(int) throw (...); + # ExceptionMember(int) throw (...) { + # PureVirtual(int) = 0; + # + # These are functions of some sort, where the compiler would be fine + # if they had named parameters, but people often omit those + # identifiers to reduce clutter: + # (FunctionPointer)(int); + # (FunctionPointer)(int) = value; + # Function((function_pointer_arg)(int)) + # ; + # <(FunctionPointerTemplateArgument)(int)>; + remainder = line[match.end(0):] + if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder): + # Looks like an unnamed parameter. + + # Don't warn on any kind of template arguments. + if Match(r'^\s*>', remainder): + return False + + # Don't warn on assignments to function pointers, but keep warnings for + # unnamed parameters to pure virtual functions. Note that this pattern + # will also pass on assignments of "0" to function pointers, but the + # preferred values for those would be "nullptr" or "NULL". + matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) + if matched_zero and matched_zero.group(1) != '0': + return False + + # Don't warn on function pointer declarations. For this we need + # to check what came before the "(type)" string. + if Match(r'.*\)\s*$', line[0:match.start(0)]): + return False + + # Don't warn if the parameter is named with block comments, e.g.: + # Function(int /*unused_param*/); + if '/*' in raw_line: + return False + + # Passed all filters, issue warning here. + error(filename, linenum, 'readability/function', 3, + 'All parameters should be named in a function') + return True + + # At this point, all that should be left is actual casts. + error(filename, linenum, 'readability/casting', 4, + 'Using C-style cast. Use %s<%s>(...) instead' % + (cast_type, match.group(1))) + + return True + + +_HEADERS_CONTAINING_TEMPLATES = ( + ('', ('deque',)), + ('', ('unary_function', 'binary_function', + 'plus', 'minus', 'multiplies', 'divides', 'modulus', + 'negate', + 'equal_to', 'not_equal_to', 'greater', 'less', + 'greater_equal', 'less_equal', + 'logical_and', 'logical_or', 'logical_not', + 'unary_negate', 'not1', 'binary_negate', 'not2', + 'bind1st', 'bind2nd', + 'pointer_to_unary_function', + 'pointer_to_binary_function', + 'ptr_fun', + 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', + 'mem_fun_ref_t', + 'const_mem_fun_t', 'const_mem_fun1_t', + 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', + 'mem_fun_ref', + )), + ('', ('numeric_limits',)), + ('', ('list',)), + ('', ('map', 'multimap',)), + ('', ('allocator',)), + ('', ('queue', 'priority_queue',)), + ('', ('set', 'multiset',)), + ('', ('stack',)), + ('', ('char_traits', 'basic_string',)), + ('', ('pair',)), + ('', ('vector',)), + + # gcc extensions. + # Note: std::hash is their hash, ::hash is our hash + ('', ('hash_map', 'hash_multimap',)), + ('', ('hash_set', 'hash_multiset',)), + ('', ('slist',)), + ) + +_RE_PATTERN_STRING = re.compile(r'\bstring\b') + +_re_pattern_algorithm_header = [] +for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', + 'transform'): + # Match max(..., ...), max(..., ...), but not foo->max, foo.max or + # type::max(). + _re_pattern_algorithm_header.append( + (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), + _template, + '')) + +_re_pattern_templates = [] +for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: + for _template in _templates: + _re_pattern_templates.append( + (re.compile(r'(\<|\b)' + _template + r'\s*\<'), + _template + '<>', + _header)) + + +def FilesBelongToSameModule(filename_cc, filename_h): + """Check if these two filenames belong to the same module. + + The concept of a 'module' here is a as follows: + foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the + same 'module' if they are in the same directory. + some/path/public/xyzzy and some/path/internal/xyzzy are also considered + to belong to the same module here. + + If the filename_cc contains a longer path than the filename_h, for example, + '/absolute/path/to/base/sysinfo.cc', and this file would include + 'base/sysinfo.h', this function also produces the prefix needed to open the + header. This is used by the caller of this function to more robustly open the + header file. We don't have access to the real include paths in this context, + so we need this guesswork here. + + Known bugs: tools/base/bar.cc and base/bar.h belong to the same module + according to this implementation. Because of this, this function gives + some false positives. This should be sufficiently rare in practice. + + Args: + filename_cc: is the path for the .cc file + filename_h: is the path for the header path + + Returns: + Tuple with a bool and a string: + bool: True if filename_cc and filename_h belong to the same module. + string: the additional prefix needed to open the header file. + """ + + if not filename_cc.endswith('.cc'): + return (False, '') + filename_cc = filename_cc[:-len('.cc')] + if filename_cc.endswith('_unittest'): + filename_cc = filename_cc[:-len('_unittest')] + elif filename_cc.endswith('_test'): + filename_cc = filename_cc[:-len('_test')] + filename_cc = filename_cc.replace('/public/', '/') + filename_cc = filename_cc.replace('/internal/', '/') + + if not filename_h.endswith('.h'): + return (False, '') + filename_h = filename_h[:-len('.h')] + if filename_h.endswith('-inl'): + filename_h = filename_h[:-len('-inl')] + filename_h = filename_h.replace('/public/', '/') + filename_h = filename_h.replace('/internal/', '/') + + files_belong_to_same_module = filename_cc.endswith(filename_h) + common_path = '' + if files_belong_to_same_module: + common_path = filename_cc[:-len(filename_h)] + return files_belong_to_same_module, common_path + + +def UpdateIncludeState(filename, include_state, io=codecs): + """Fill up the include_state with new includes found from the file. + + Args: + filename: the name of the header to read. + include_state: an _IncludeState instance in which the headers are inserted. + io: The io factory to use to read the file. Provided for testability. + + Returns: + True if a header was succesfully added. False otherwise. + """ + headerfile = None + try: + headerfile = io.open(filename, 'r', 'utf8', 'replace') + except IOError: + return False + linenum = 0 + for line in headerfile: + linenum += 1 + clean_line = CleanseComments(line) + match = _RE_PATTERN_INCLUDE.search(clean_line) + if match: + include = match.group(2) + # The value formatting is cute, but not really used right now. + # What matters here is that the key is in include_state. + include_state.setdefault(include, '%s:%d' % (filename, linenum)) + return True + + +def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, + io=codecs): + """Reports for missing stl includes. + + This function will output warnings to make sure you are including the headers + necessary for the stl containers and functions that you use. We only give one + reason to include a header. For example, if you use both equal_to<> and + less<> in a .h file, only one (the latter in the file) of these will be + reported as a reason to include the . + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + include_state: An _IncludeState instance. + error: The function to call with any errors found. + io: The IO factory to use to read the header file. Provided for unittest + injection. + """ + required = {} # A map of header name to linenumber and the template entity. + # Example of required: { '': (1219, 'less<>') } + + for linenum in xrange(clean_lines.NumLines()): + line = clean_lines.elided[linenum] + if not line or line[0] == '#': + continue + + # String is special -- it is a non-templatized type in STL. + matched = _RE_PATTERN_STRING.search(line) + if matched: + # Don't warn about strings in non-STL namespaces: + # (We check only the first match per line; good enough.) + prefix = line[:matched.start()] + if prefix.endswith('std::') or not prefix.endswith('::'): + required[''] = (linenum, 'string') + + for pattern, template, header in _re_pattern_algorithm_header: + if pattern.search(line): + required[header] = (linenum, template) + + # The following function is just a speed up, no semantics are changed. + if not '<' in line: # Reduces the cpu time usage by skipping lines. + continue + + for pattern, template, header in _re_pattern_templates: + if pattern.search(line): + required[header] = (linenum, template) + + # The policy is that if you #include something in foo.h you don't need to + # include it again in foo.cc. Here, we will look at possible includes. + # Let's copy the include_state so it is only messed up within this function. + include_state = include_state.copy() + + # Did we find the header for this file (if any) and succesfully load it? + header_found = False + + # Use the absolute path so that matching works properly. + abs_filename = FileInfo(filename).FullName() + + # For Emacs's flymake. + # If cpplint is invoked from Emacs's flymake, a temporary file is generated + # by flymake and that file name might end with '_flymake.cc'. In that case, + # restore original file name here so that the corresponding header file can be + # found. + # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' + # instead of 'foo_flymake.h' + abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) + + # include_state is modified during iteration, so we iterate over a copy of + # the keys. + header_keys = include_state.keys() + for header in header_keys: + (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) + fullpath = common_path + header + if same_module and UpdateIncludeState(fullpath, include_state, io): + header_found = True + + # If we can't find the header file for a .cc, assume it's because we don't + # know where to look. In that case we'll give up as we're not sure they + # didn't include it in the .h file. + # TODO(unknown): Do a better job of finding .h files so we are confident that + # not having the .h file means there isn't one. + if filename.endswith('.cc') and not header_found: + return + + # All the lines have been processed, report the errors found. + for required_header_unstripped in required: + template = required[required_header_unstripped][1] + if required_header_unstripped.strip('<>"') not in include_state: + error(filename, required[required_header_unstripped][0], + 'build/include_what_you_use', 4, + 'Add #include ' + required_header_unstripped + ' for ' + template) + + +_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') + + +def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): + """Check that make_pair's template arguments are deduced. + + G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are + specified explicitly, and such use isn't intended in any case. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) + if match: + error(filename, linenum, 'build/explicit_make_pair', + 4, # 4 = high confidence + 'For C++11-compatibility, omit template arguments from make_pair' + ' OR use pair directly OR if appropriate, construct a pair directly') + + +def ProcessLine(filename, file_extension, clean_lines, line, + include_state, function_state, nesting_state, error, + extra_check_functions=[]): + """Processes a single line in the file. + + Args: + filename: Filename of the file that is being processed. + file_extension: The extension (dot not included) of the file. + clean_lines: An array of strings, each representing a line of the file, + with comments stripped. + line: Number of line being processed. + include_state: An _IncludeState instance in which the headers are inserted. + function_state: A _FunctionState instance which counts function lines, etc. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: A callable to which errors are reported, which takes 4 arguments: + filename, line number, error level, and message + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ + raw_lines = clean_lines.raw_lines + ParseNolintSuppressions(filename, raw_lines[line], line, error) + nesting_state.Update(filename, clean_lines, line, error) + if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM: + return + CheckForFunctionLengths(filename, clean_lines, line, function_state, error) + CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) + CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) + CheckLanguage(filename, clean_lines, line, file_extension, include_state, + nesting_state, error) + CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) + CheckForNonStandardConstructs(filename, clean_lines, line, + nesting_state, error) + CheckVlogArguments(filename, clean_lines, line, error) + CheckCaffeRandom(filename, clean_lines, line, error) + CheckPosixThreading(filename, clean_lines, line, error) + CheckInvalidIncrement(filename, clean_lines, line, error) + CheckMakePairUsesDeduction(filename, clean_lines, line, error) + for check_fn in extra_check_functions: + check_fn(filename, clean_lines, line, error) + +def ProcessFileData(filename, file_extension, lines, error, + extra_check_functions=[]): + """Performs lint checks and reports any errors to the given error function. + + Args: + filename: Filename of the file that is being processed. + file_extension: The extension (dot not included) of the file. + lines: An array of strings, each representing a line of the file, with the + last element being empty if the file is terminated with a newline. + error: A callable to which errors are reported, which takes 4 arguments: + filename, line number, error level, and message + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ + lines = (['// marker so line numbers and indices both start at 1'] + lines + + ['// marker so line numbers end in a known way']) + + include_state = _IncludeState() + function_state = _FunctionState() + nesting_state = _NestingState() + + ResetNolintSuppressions() + + CheckForCopyright(filename, lines, error) + + if file_extension == 'h': + CheckForHeaderGuard(filename, lines, error) + + RemoveMultiLineComments(filename, lines, error) + clean_lines = CleansedLines(lines) + for line in xrange(clean_lines.NumLines()): + ProcessLine(filename, file_extension, clean_lines, line, + include_state, function_state, nesting_state, error, + extra_check_functions) + nesting_state.CheckCompletedBlocks(filename, error) + + CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) + + # We check here rather than inside ProcessLine so that we see raw + # lines rather than "cleaned" lines. + CheckForBadCharacters(filename, lines, error) + + CheckForNewlineAtEOF(filename, lines, error) + +def ProcessFile(filename, vlevel, extra_check_functions=[]): + """Does google-lint on a single file. + + Args: + filename: The name of the file to parse. + + vlevel: The level of errors to report. Every error of confidence + >= verbose_level will be reported. 0 is a good default. + + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ + + _SetVerboseLevel(vlevel) + + try: + # Support the UNIX convention of using "-" for stdin. Note that + # we are not opening the file with universal newline support + # (which codecs doesn't support anyway), so the resulting lines do + # contain trailing '\r' characters if we are reading a file that + # has CRLF endings. + # If after the split a trailing '\r' is present, it is removed + # below. If it is not expected to be present (i.e. os.linesep != + # '\r\n' as in Windows), a warning is issued below if this file + # is processed. + + if filename == '-': + lines = codecs.StreamReaderWriter(sys.stdin, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace').read().split('\n') + else: + lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') + + carriage_return_found = False + # Remove trailing '\r'. + for linenum in range(len(lines)): + if lines[linenum].endswith('\r'): + lines[linenum] = lines[linenum].rstrip('\r') + carriage_return_found = True + + except IOError: + sys.stderr.write( + "Skipping input '%s': Can't open for reading\n" % filename) + return + + # Note, if no dot is found, this will give the entire filename as the ext. + file_extension = filename[filename.rfind('.') + 1:] + + # When reading from stdin, the extension is unknown, so no cpplint tests + # should rely on the extension. + if filename != '-' and file_extension not in _valid_extensions: + sys.stderr.write('Ignoring %s; not a valid file name ' + '(%s)\n' % (filename, ', '.join(_valid_extensions))) + else: + ProcessFileData(filename, file_extension, lines, Error, + extra_check_functions) + if carriage_return_found and os.linesep != '\r\n': + # Use 0 for linenum since outputting only one error for potentially + # several lines. + Error(filename, 0, 'whitespace/newline', 1, + 'One or more unexpected \\r (^M) found;' + 'better to use only a \\n') + + sys.stderr.write('Done processing %s\n' % filename) + + +def PrintUsage(message): + """Prints a brief usage string and exits, optionally with an error message. + + Args: + message: The optional error message. + """ + sys.stderr.write(_USAGE) + if message: + sys.exit('\nFATAL ERROR: ' + message) + else: + sys.exit(1) + + +def PrintCategories(): + """Prints a list of all the error-categories used by error messages. + + These are the categories used to filter messages via --filter. + """ + sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) + sys.exit(0) + + +def ParseArguments(args): + """Parses the command line arguments. + + This may set the output format and verbosity level as side-effects. + + Args: + args: The command line arguments: + + Returns: + The list of filenames to lint. + """ + try: + (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', + 'counting=', + 'filter=', + 'root=', + 'linelength=', + 'extensions=']) + except getopt.GetoptError: + PrintUsage('Invalid arguments.') + + verbosity = _VerboseLevel() + output_format = _OutputFormat() + filters = '' + counting_style = '' + + for (opt, val) in opts: + if opt == '--help': + PrintUsage(None) + elif opt == '--output': + if val not in ('emacs', 'vs7', 'eclipse'): + PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') + output_format = val + elif opt == '--verbose': + verbosity = int(val) + elif opt == '--filter': + filters = val + if not filters: + PrintCategories() + elif opt == '--counting': + if val not in ('total', 'toplevel', 'detailed'): + PrintUsage('Valid counting options are total, toplevel, and detailed') + counting_style = val + elif opt == '--root': + global _root + _root = val + elif opt == '--linelength': + global _line_length + try: + _line_length = int(val) + except ValueError: + PrintUsage('Line length must be digits.') + elif opt == '--extensions': + global _valid_extensions + try: + _valid_extensions = set(val.split(',')) + except ValueError: + PrintUsage('Extensions must be comma seperated list.') + + if not filenames: + PrintUsage('No files were specified.') + + _SetOutputFormat(output_format) + _SetVerboseLevel(verbosity) + _SetFilters(filters) + _SetCountingStyle(counting_style) + + return filenames + + +def main(): + filenames = ParseArguments(sys.argv[1:]) + + # Change stderr to write with replacement characters so we don't die + # if we try to print something containing non-ASCII characters. + sys.stderr = codecs.StreamReaderWriter(sys.stderr, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace') + + _cpplint_state.ResetErrorCounts() + for filename in filenames: + ProcessFile(filename, _cpplint_state.verbose_level) + _cpplint_state.PrintErrorCounts() + + sys.exit(_cpplint_state.error_count > 0) + + +if __name__ == '__main__': + main() diff --git a/modules/dnns_easily_fooled/caffe/scripts/deploy_docs.sh b/modules/dnns_easily_fooled/caffe/scripts/deploy_docs.sh new file mode 100755 index 000000000..b60296139 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/scripts/deploy_docs.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env sh +# Publish/ Pull-request documentation to the gh-pages site. + +# The remote for pushing the docs (defaults to origin). +# This is where you will submit the PR to BVLC:gh-pages from. +REMOTE=${1:-origin} + +echo "Generating docs and pushing to $REMOTE:gh-pages..." +echo "To build and view docs when not on master, simply do 'jekyll serve -s docs'." +echo + +REMOTE_URL=`git config --get remote.${REMOTE}.url` +BRANCH=`git rev-parse --abbrev-ref HEAD` +MSG=`git log --oneline -1` + +if [[ $BRANCH = 'master' ]]; then + # Find the docs dir, no matter where the script is called + DIR="$( cd "$(dirname "$0")" ; pwd -P )" + DOCS_SITE_DIR=$DIR/../docs/_site + + # Make sure that docs/_site tracks remote:gh-pages. + # If not, then we make a new repo and check out just that branch. + mkdir -p $DOCS_SITE_DIR + cd $DOCS_SITE_DIR + SITE_REMOTE_URL=`git config --get remote.${REMOTE}.url` + SITE_BRANCH=`git rev-parse --abbrev-ref HEAD` + + echo $SITE_REMOTE_URL + echo $SITE_BRANCH + echo `pwd` + + if [[ ( $SITE_REMOTE_URL = $REMOTE_URL ) && ( $SITE_BRANCH = 'gh-pages' ) ]]; then + echo "Confirmed that docs/_site has same remote as main repo, and is on gh-pages." + else + echo "Checking out $REMOTE:gh-pages into docs/_site (will take a little time)." + git init . + git remote add -t gh-pages -f $REMOTE $REMOTE_URL + git checkout gh-pages + fi + + echo "Building the site into docs/_site, and committing the changes." + jekyll build -s .. -d . + git add --all . + git commit -m "$MSG" + git push $REMOTE gh-pages + + echo "All done!" + cd ../.. +else echo "You must run this deployment script from the 'master' branch." +fi diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/blob.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/blob.cpp new file mode 100644 index 000000000..e603712fd --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/blob.cpp @@ -0,0 +1,214 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void Blob::Reshape(const int num, const int channels, const int height, + const int width) { + CHECK_GE(num, 0); + CHECK_GE(channels, 0); + CHECK_GE(height, 0); + CHECK_GE(width, 0); + num_ = num; + channels_ = channels; + height_ = height; + width_ = width; + count_ = num_ * channels_ * height_ * width_; + if (count_) { + data_.reset(new SyncedMemory(count_ * sizeof(Dtype))); + diff_.reset(new SyncedMemory(count_ * sizeof(Dtype))); + } else { + data_.reset(reinterpret_cast(NULL)); + diff_.reset(reinterpret_cast(NULL)); + } +} + +template +void Blob::ReshapeLike(const Blob& other) { + Reshape(other.num(), other.channels(), other.height(), other.width()); +} + +template +Blob::Blob(const int num, const int channels, const int height, + const int width) { + Reshape(num, channels, height, width); +} + +template +const Dtype* Blob::cpu_data() const { + CHECK(data_); + return (const Dtype*)data_->cpu_data(); +} + +template +void Blob::set_cpu_data(Dtype* data) { + CHECK(data); + data_->set_cpu_data(data); +} + +template +const Dtype* Blob::gpu_data() const { + CHECK(data_); + return (const Dtype*)data_->gpu_data(); +} + +template +const Dtype* Blob::cpu_diff() const { + CHECK(diff_); + return (const Dtype*)diff_->cpu_data(); +} + +template +const Dtype* Blob::gpu_diff() const { + CHECK(diff_); + return (const Dtype*)diff_->gpu_data(); +} + +template +Dtype* Blob::mutable_cpu_data() { + CHECK(data_); + return reinterpret_cast(data_->mutable_cpu_data()); +} + +template +Dtype* Blob::mutable_gpu_data() { + CHECK(data_); + return reinterpret_cast(data_->mutable_gpu_data()); +} + +template +Dtype* Blob::mutable_cpu_diff() { + CHECK(diff_); + return reinterpret_cast(diff_->mutable_cpu_data()); +} + +template +Dtype* Blob::mutable_gpu_diff() { + CHECK(diff_); + return reinterpret_cast(diff_->mutable_gpu_data()); +} + +template +void Blob::ShareData(const Blob& other) { + CHECK_EQ(count_, other.count()); + data_ = other.data(); +} + +template +void Blob::ShareDiff(const Blob& other) { + CHECK_EQ(count_, other.count()); + diff_ = other.diff(); +} + +// The "update" method is used for parameter blobs in a Net, which are stored +// as Blob or Blob -- hence we do not define it for +// Blob or Blob. +template <> void Blob::Update() { NOT_IMPLEMENTED; } +template <> void Blob::Update() { NOT_IMPLEMENTED; } + +template +void Blob::Update() { + // We will perform update based on where the data is located. + switch (data_->head()) { + case SyncedMemory::HEAD_AT_CPU: + // perform computation on CPU + caffe_axpy(count_, Dtype(-1), + reinterpret_cast(diff_->cpu_data()), + reinterpret_cast(data_->mutable_cpu_data())); + break; + case SyncedMemory::HEAD_AT_GPU: + case SyncedMemory::SYNCED: + // perform computation on GPU + caffe_gpu_axpy(count_, Dtype(-1), + reinterpret_cast(diff_->gpu_data()), + reinterpret_cast(data_->mutable_gpu_data())); + break; + default: + LOG(FATAL) << "Syncedmem not initialized."; + } +} + +template +void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { + if (num_ != source.num() || channels_ != source.channels() || + height_ != source.height() || width_ != source.width()) { + if (reshape) { + Reshape(source.num(), source.channels(), source.height(), source.width()); + } else { + LOG(FATAL) << "Trying to copy blobs of different sizes."; + } + } + switch (Caffe::mode()) { + case Caffe::GPU: + if (copy_diff) { + CUDA_CHECK(cudaMemcpy(diff_->mutable_gpu_data(), source.gpu_diff(), + sizeof(Dtype) * count_, cudaMemcpyDeviceToDevice)); + } else { + CUDA_CHECK(cudaMemcpy(data_->mutable_gpu_data(), source.gpu_data(), + sizeof(Dtype) * count_, cudaMemcpyDeviceToDevice)); + } + break; + case Caffe::CPU: + if (copy_diff) { + memcpy(diff_->mutable_cpu_data(), source.cpu_diff(), + sizeof(Dtype) * count_); + } else { + memcpy(data_->mutable_cpu_data(), source.cpu_data(), + sizeof(Dtype) * count_); + } + break; + default: + LOG(FATAL) << "Unknown caffe mode."; + } +} + +template +void Blob::FromProto(const BlobProto& proto) { + Reshape(proto.num(), proto.channels(), proto.height(), proto.width()); + // copy data + Dtype* data_vec = mutable_cpu_data(); + for (int i = 0; i < count_; ++i) { + data_vec[i] = proto.data(i); + } + if (proto.diff_size() > 0) { + Dtype* diff_vec = mutable_cpu_diff(); + for (int i = 0; i < count_; ++i) { + diff_vec[i] = proto.diff(i); + } + } +} + +template +void Blob::ToProto(BlobProto* proto, bool write_diff) const { + proto->set_num(num_); + proto->set_channels(channels_); + proto->set_height(height_); + proto->set_width(width_); + proto->clear_data(); + proto->clear_diff(); + const Dtype* data_vec = cpu_data(); + for (int i = 0; i < count_; ++i) { + proto->add_data(data_vec[i]); + } + if (write_diff) { + const Dtype* diff_vec = cpu_diff(); + for (int i = 0; i < count_; ++i) { + proto->add_diff(diff_vec[i]); + } + } +} + +INSTANTIATE_CLASS(Blob); +template class Blob; +template class Blob; + +} // namespace caffe + diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/common.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/common.cpp new file mode 100644 index 000000000..6eadfc791 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/common.cpp @@ -0,0 +1,198 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/rng.hpp" + +namespace caffe { + +shared_ptr Caffe::singleton_; + + +// curand seeding +int64_t cluster_seedgen(void) { + int64_t s, seed, pid; + pid = getpid(); + s = time(NULL); + seed = abs(((s * 181) * ((pid - 83) * 359)) % 104729); + return seed; +} + + +Caffe::Caffe() + : mode_(Caffe::CPU), phase_(Caffe::TRAIN), cublas_handle_(NULL), + curand_generator_(NULL), + random_generator_() { + // Try to create a cublas handler, and report an error if failed (but we will + // keep the program running as one might just want to run CPU code). + if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) { + LOG(ERROR) << "Cannot create Cublas handle. Cublas won't be available."; + } + // Try to create a curand handler. + if (curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT) + != CURAND_STATUS_SUCCESS || + curandSetPseudoRandomGeneratorSeed(curand_generator_, cluster_seedgen()) + != CURAND_STATUS_SUCCESS) { + LOG(ERROR) << "Cannot create Curand generator. Curand won't be available."; + } +} + +Caffe::~Caffe() { + if (cublas_handle_) CUBLAS_CHECK(cublasDestroy(cublas_handle_)); + if (curand_generator_) { + CURAND_CHECK(curandDestroyGenerator(curand_generator_)); + } +} + +void Caffe::set_random_seed(const unsigned int seed) { + // Curand seed + // Yangqing's note: simply setting the generator seed does not seem to + // work on the tesla K20s, so I wrote the ugly reset thing below. + if (Get().curand_generator_) { + CURAND_CHECK(curandDestroyGenerator(curand_generator())); + CURAND_CHECK(curandCreateGenerator(&Get().curand_generator_, + CURAND_RNG_PSEUDO_DEFAULT)); + CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(curand_generator(), + seed)); + } else { + LOG(ERROR) << "Curand not available. Skipping setting the curand seed."; + } + // RNG seed + Get().random_generator_.reset(new RNG(seed)); +} + +void Caffe::SetDevice(const int device_id) { + int current_device; + CUDA_CHECK(cudaGetDevice(¤t_device)); + if (current_device == device_id) { + return; + } + // The call to cudaSetDevice must come before any calls to Get, which + // may perform initialization using the GPU. + CUDA_CHECK(cudaSetDevice(device_id)); + if (Get().cublas_handle_) CUBLAS_CHECK(cublasDestroy(Get().cublas_handle_)); + if (Get().curand_generator_) { + CURAND_CHECK(curandDestroyGenerator(Get().curand_generator_)); + } + CUBLAS_CHECK(cublasCreate(&Get().cublas_handle_)); + CURAND_CHECK(curandCreateGenerator(&Get().curand_generator_, + CURAND_RNG_PSEUDO_DEFAULT)); + CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(Get().curand_generator_, + cluster_seedgen())); +} + +void Caffe::DeviceQuery() { + cudaDeviceProp prop; + int device; + if (cudaSuccess != cudaGetDevice(&device)) { + printf("No cuda device present.\n"); + return; + } + CUDA_CHECK(cudaGetDeviceProperties(&prop, device)); + printf("Device id: %d\n", device); + printf("Major revision number: %d\n", prop.major); + printf("Minor revision number: %d\n", prop.minor); + printf("Name: %s\n", prop.name); + printf("Total global memory: %lu\n", prop.totalGlobalMem); + printf("Total shared memory per block: %lu\n", prop.sharedMemPerBlock); + printf("Total registers per block: %d\n", prop.regsPerBlock); + printf("Warp size: %d\n", prop.warpSize); + printf("Maximum memory pitch: %lu\n", prop.memPitch); + printf("Maximum threads per block: %d\n", prop.maxThreadsPerBlock); + printf("Maximum dimension of block: %d, %d, %d\n", + prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); + printf("Maximum dimension of grid: %d, %d, %d\n", + prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); + printf("Clock rate: %d\n", prop.clockRate); + printf("Total constant memory: %lu\n", prop.totalConstMem); + printf("Texture alignment: %lu\n", prop.textureAlignment); + printf("Concurrent copy and execution: %s\n", + (prop.deviceOverlap ? "Yes" : "No")); + printf("Number of multiprocessors: %d\n", prop.multiProcessorCount); + printf("Kernel execution timeout: %s\n", + (prop.kernelExecTimeoutEnabled ? "Yes" : "No")); + return; +} + + +class Caffe::RNG::Generator { + public: + Generator() : rng_(new caffe::rng_t(cluster_seedgen())) {} + explicit Generator(unsigned int seed) : rng_(new caffe::rng_t(seed)) {} + caffe::rng_t* rng() { return rng_.get(); } + private: + shared_ptr rng_; +}; + +Caffe::RNG::RNG() : generator_(new Generator()) { } + +Caffe::RNG::RNG(unsigned int seed) : generator_(new Generator(seed)) { } + +Caffe::RNG& Caffe::RNG::operator=(const RNG& other) { + generator_.reset(other.generator_.get()); + return *this; +} + +void* Caffe::RNG::generator() { + return static_cast(generator_->rng()); +} + +const char* cublasGetErrorString(cublasStatus_t error) { + switch (error) { + case CUBLAS_STATUS_SUCCESS: + return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: + return "CUBLAS_STATUS_NOT_INITIALIZED"; + case CUBLAS_STATUS_ALLOC_FAILED: + return "CUBLAS_STATUS_ALLOC_FAILED"; + case CUBLAS_STATUS_INVALID_VALUE: + return "CUBLAS_STATUS_INVALID_VALUE"; + case CUBLAS_STATUS_ARCH_MISMATCH: + return "CUBLAS_STATUS_ARCH_MISMATCH"; + case CUBLAS_STATUS_MAPPING_ERROR: + return "CUBLAS_STATUS_MAPPING_ERROR"; + case CUBLAS_STATUS_EXECUTION_FAILED: + return "CUBLAS_STATUS_EXECUTION_FAILED"; + case CUBLAS_STATUS_INTERNAL_ERROR: + return "CUBLAS_STATUS_INTERNAL_ERROR"; + case CUBLAS_STATUS_NOT_SUPPORTED: + return "CUBLAS_STATUS_NOT_SUPPORTED"; + } + return "Unknown cublas status"; +} + +const char* curandGetErrorString(curandStatus_t error) { + switch (error) { + case CURAND_STATUS_SUCCESS: + return "CURAND_STATUS_SUCCESS"; + case CURAND_STATUS_VERSION_MISMATCH: + return "CURAND_STATUS_VERSION_MISMATCH"; + case CURAND_STATUS_NOT_INITIALIZED: + return "CURAND_STATUS_NOT_INITIALIZED"; + case CURAND_STATUS_ALLOCATION_FAILED: + return "CURAND_STATUS_ALLOCATION_FAILED"; + case CURAND_STATUS_TYPE_ERROR: + return "CURAND_STATUS_TYPE_ERROR"; + case CURAND_STATUS_OUT_OF_RANGE: + return "CURAND_STATUS_OUT_OF_RANGE"; + case CURAND_STATUS_LENGTH_NOT_MULTIPLE: + return "CURAND_STATUS_LENGTH_NOT_MULTIPLE"; + case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: + return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED"; + case CURAND_STATUS_LAUNCH_FAILURE: + return "CURAND_STATUS_LAUNCH_FAILURE"; + case CURAND_STATUS_PREEXISTING_FAILURE: + return "CURAND_STATUS_PREEXISTING_FAILURE"; + case CURAND_STATUS_INITIALIZATION_FAILED: + return "CURAND_STATUS_INITIALIZATION_FAILED"; + case CURAND_STATUS_ARCH_MISMATCH: + return "CURAND_STATUS_ARCH_MISMATCH"; + case CURAND_STATUS_INTERNAL_ERROR: + return "CURAND_STATUS_INTERNAL_ERROR"; + } + return "Unknown curand status"; +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layer_factory.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layer_factory.cpp new file mode 100644 index 000000000..d6e506dfb --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layer_factory.cpp @@ -0,0 +1,101 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_LAYER_FACTORY_HPP_ +#define CAFFE_LAYER_FACTORY_HPP_ + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +using std::string; + +namespace caffe { + + +// A function to get a specific layer from the specification given in +// LayerParameter. Ideally this would be replaced by a factory pattern, +// but we will leave it this way for now. +template +Layer* GetLayer(const LayerParameter& param) { + const string& name = param.name(); + const LayerParameter_LayerType& type = param.type(); + switch (type) { + case LayerParameter_LayerType_ACCURACY: + return new AccuracyLayer(param); + case LayerParameter_LayerType_ARGMAX: + return new ArgMaxLayer(param); + case LayerParameter_LayerType_BNLL: + return new BNLLLayer(param); + case LayerParameter_LayerType_CONCAT: + return new ConcatLayer(param); + case LayerParameter_LayerType_CONVOLUTION: + return new ConvolutionLayer(param); + case LayerParameter_LayerType_DATA: + return new DataLayer(param); + case LayerParameter_LayerType_DROPOUT: + return new DropoutLayer(param); + case LayerParameter_LayerType_DUMMY_DATA: + return new DummyDataLayer(param); + case LayerParameter_LayerType_EUCLIDEAN_LOSS: + return new EuclideanLossLayer(param); + case LayerParameter_LayerType_ELTWISE: + return new EltwiseLayer(param); + case LayerParameter_LayerType_FLATTEN: + return new FlattenLayer(param); + case LayerParameter_LayerType_HDF5_DATA: + return new HDF5DataLayer(param); + case LayerParameter_LayerType_HDF5_OUTPUT: + return new HDF5OutputLayer(param); + case LayerParameter_LayerType_HINGE_LOSS: + return new HingeLossLayer(param); + case LayerParameter_LayerType_IMAGE_DATA: + return new ImageDataLayer(param); + case LayerParameter_LayerType_IM2COL: + return new Im2colLayer(param); + case LayerParameter_LayerType_INFOGAIN_LOSS: + return new InfogainLossLayer(param); + case LayerParameter_LayerType_INNER_PRODUCT: + return new InnerProductLayer(param); + case LayerParameter_LayerType_LRN: + return new LRNLayer(param); + case LayerParameter_LayerType_MEMORY_DATA: + return new MemoryDataLayer(param); + case LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS: + return new MultinomialLogisticLossLayer(param); + case LayerParameter_LayerType_POOLING: + return new PoolingLayer(param); + case LayerParameter_LayerType_POWER: + return new PowerLayer(param); + case LayerParameter_LayerType_RELU: + return new ReLULayer(param); + case LayerParameter_LayerType_SIGMOID: + return new SigmoidLayer(param); + case LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS: + return new SigmoidCrossEntropyLossLayer(param); + case LayerParameter_LayerType_SOFTMAX: + return new SoftmaxLayer(param); + case LayerParameter_LayerType_SOFTMAX_LOSS: + return new SoftmaxWithLossLayer(param); + case LayerParameter_LayerType_SPLIT: + return new SplitLayer(param); + case LayerParameter_LayerType_TANH: + return new TanHLayer(param); + case LayerParameter_LayerType_WINDOW_DATA: + return new WindowDataLayer(param); + case LayerParameter_LayerType_NONE: + LOG(FATAL) << "Layer " << name << " has unspecified type."; + default: + LOG(FATAL) << "Layer " << name << " has unknown type " << type; + } + // just to suppress old compiler warnings. + return (Layer*)(NULL); +} + +template Layer* GetLayer(const LayerParameter& param); +template Layer* GetLayer(const LayerParameter& param); + +} // namespace caffe + +#endif // CAFFE_LAYER_FACTORY_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/accuracy_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/accuracy_layer.cpp new file mode 100644 index 000000000..fbc943eaf --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/accuracy_layer.cpp @@ -0,0 +1,64 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/io.hpp" + +using std::max; + +namespace caffe { + +template +void AccuracyLayer::SetUp( + const vector*>& bottom, vector*>* top) { + Layer::SetUp(bottom, top); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()) + << "The data and label should have the same number."; + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + (*top)[0]->Reshape(1, 2, 1, 1); +} + +template +Dtype AccuracyLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + Dtype accuracy = 0; + Dtype logprob = 0; + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + for (int i = 0; i < num; ++i) { + // Accuracy + Dtype maxval = -FLT_MAX; + int max_id = 0; + for (int j = 0; j < dim; ++j) { + if (bottom_data[i * dim + j] > maxval) { + maxval = bottom_data[i * dim + j]; + max_id = j; + } + } + if (max_id == static_cast(bottom_label[i])) { + ++accuracy; + } + Dtype prob = max(bottom_data[i * dim + static_cast(bottom_label[i])], + Dtype(kLOG_THRESHOLD)); + logprob -= log(prob); + } + // LOG(INFO) << "Accuracy: " << accuracy; + (*top)[0]->mutable_cpu_data()[0] = accuracy / num; + (*top)[0]->mutable_cpu_data()[1] = logprob / num; + // Accuracy layer should not be used as a loss function. + return Dtype(0); +} + +INSTANTIATE_CLASS(AccuracyLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/argmax_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/argmax_layer.cpp new file mode 100644 index 000000000..cc31c0f52 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/argmax_layer.cpp @@ -0,0 +1,55 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + + +namespace caffe { + +template +void ArgMaxLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + out_max_val_ = this->layer_param_.argmax_param().out_max_val(); + if (out_max_val_) { + // Produces max_ind and max_val + (*top)[0]->Reshape(bottom[0]->num(), 2, 1, 1); + } else { + // Produces only max_ind + (*top)[0]->Reshape(bottom[0]->num(), 1, 1, 1); + } +} + +template +Dtype ArgMaxLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + for (int i = 0; i < num; ++i) { + Dtype max_val = -FLT_MAX; + int max_ind = 0; + for (int j = 0; j < dim; ++j) { + if (bottom_data[i * dim + j] > max_val) { + max_val = bottom_data[i * dim + j]; + max_ind = j; + } + } + if (out_max_val_) { + top_data[i * 2] = max_ind; + top_data[i * 2 + 1] = max_val; + } else { + top_data[i] = max_ind; + } + } + return Dtype(0); +} + +INSTANTIATE_CLASS(ArgMaxLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/bnll_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/bnll_layer.cpp new file mode 100644 index 000000000..d08adc49e --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/bnll_layer.cpp @@ -0,0 +1,50 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +using std::min; + +namespace caffe { + +const float kBNLL_THRESHOLD = 50.; + +template +Dtype BNLLLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + top_data[i] = bottom_data[i] > 0 ? + bottom_data[i] + log(1. + exp(-bottom_data[i])) : + log(1. + exp(bottom_data[i])); + } + return Dtype(0); +} + +template +void BNLLLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const int count = (*bottom)[0]->count(); + Dtype expval; + for (int i = 0; i < count; ++i) { + expval = exp(min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); + bottom_diff[i] = top_diff[i] * expval / (expval + 1.); + } + } +} + + +INSTANTIATE_CLASS(BNLLLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/bnll_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/bnll_layer.cu new file mode 100644 index 000000000..75bea00e9 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/bnll_layer.cu @@ -0,0 +1,65 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +using std::max; + +namespace caffe { + +const float kBNLL_THRESHOLD = 50.; + +template +__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : + log(1. + exp(in[index])); + } +} + +template +Dtype BNLLLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; + return Dtype(0); +} + +template +__global__ void BNLLBackward(const int n, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +template +void BNLLLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const int count = (*bottom)[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLBackward<<>>( + count, top_diff, bottom_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_CLASS(BNLLLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/concat_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/concat_layer.cpp new file mode 100644 index 000000000..4541ee742 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/concat_layer.cpp @@ -0,0 +1,101 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void ConcatLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + concat_dim_ = this->layer_param_.concat_param().concat_dim(); + CHECK_GE(concat_dim_, 0) << + "concat_dim should be >= 0"; + CHECK_LE(concat_dim_, 1) << + "For now concat_dim <=1, it can only concat num and channels"; + + // Initialize with the first blob. + count_ = bottom[0]->count(); + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + for (int i = 1; i < bottom.size(); ++i) { + count_ += bottom[i]->count(); + if (concat_dim_== 0) { + num_ += bottom[i]->num(); + } else if (concat_dim_ == 1) { + channels_ += bottom[i]->channels(); + } else if (concat_dim_ == 2) { + height_ += bottom[i]->height(); + } else if (concat_dim_ == 3) { + width_ += bottom[i]->width(); + } + } + (*top)[0]->Reshape(num_, channels_, height_, width_); + CHECK_EQ(count_, (*top)[0]->count()); +} + +template +Dtype ConcatLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + if (concat_dim_== 0) { + int offset_num = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + int num_elem = bottom[i]->count(); + caffe_copy(num_elem, bottom_data, top_data+(*top)[0]->offset(offset_num)); + offset_num += bottom[i]->num(); + } + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->cpu_data(); + int num_elem = + bottom[i]->channels()*bottom[i]->height()*bottom[i]->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, bottom_data+bottom[i]->offset(n), + top_data+(*top)[0]->offset(n, offset_channel)); + } + offset_channel += bottom[i]->channels(); + } // concat_dim_ is guaranteed to be 0 or 1 by SetUp. + } + return Dtype(0.); +} + +template +void ConcatLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom->size(); ++i) { + Blob* blob = (*bottom)[i]; + Dtype* bottom_diff = blob->mutable_cpu_diff(); + caffe_copy(blob->count(), + top_diff+top[0]->offset(offset_num), bottom_diff); + offset_num += blob->num(); + } + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom->size(); ++i) { + Blob* blob = (*bottom)[i]; + Dtype* bottom_diff = blob->mutable_cpu_diff(); + int num_elem = blob->channels()*blob->height()*blob->width(); + for (int n = 0; n < num_; ++n) { + caffe_copy(num_elem, top_diff+top[0]->offset(n, offset_channel), + bottom_diff+blob->offset(n)); + } + offset_channel += blob->channels(); + } + } // concat_dim_ is guaranteed to be 0 or 1 by SetUp. +} + +INSTANTIATE_CLASS(ConcatLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/concat_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/concat_layer.cu new file mode 100644 index 000000000..2820bf0df --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/concat_layer.cu @@ -0,0 +1,75 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +Dtype ConcatLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + caffe_gpu_copy(bottom[i]->count(), bottom_data, + top_data + (*top)[0]->offset(offset_num)); + offset_num += bottom[i]->num(); + } + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + int num_elem = + bottom[i]->channels() * bottom[i]->height() * bottom[i]->width(); + for (int n = 0; n < num_; ++n) { + caffe_gpu_copy(num_elem, bottom_data+bottom[i]->offset(n), + top_data + (*top)[0]->offset(n, offset_channel)); + } + offset_channel += bottom[i]->channels(); + } + } else { + LOG(FATAL) << "concat_dim along dim" << concat_dim_ << + " not implemented yet"; + } + return Dtype(0.); +} + +template +void ConcatLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom->size(); ++i) { + Blob* blob = (*bottom)[i]; + Dtype* bottom_diff = blob->mutable_gpu_diff(); + caffe_gpu_copy(blob->count(), + top_diff + top[0]->offset(offset_num), bottom_diff); + offset_num += blob->num(); + } + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom->size(); ++i) { + Blob* blob = (*bottom)[i]; + Dtype* bottom_diff = blob->mutable_gpu_diff(); + int num_elem = blob->channels()*blob->height()*blob->width(); + for (int n = 0; n < num_; ++n) { + caffe_gpu_copy(num_elem, top_diff + top[0]->offset(n, offset_channel), + bottom_diff + blob->offset(n)); + } + offset_channel += blob->channels(); + } + } else { + LOG(FATAL) << "concat_dim along dim" << concat_dim_ << + " not implemented yet"; + } +} + +INSTANTIATE_CLASS(ConcatLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/conv_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/conv_layer.cpp new file mode 100644 index 000000000..880b83afc --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/conv_layer.cpp @@ -0,0 +1,167 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/filler.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void ConvolutionLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + kernel_size_ = this->layer_param_.convolution_param().kernel_size(); + stride_ = this->layer_param_.convolution_param().stride(); + group_ = this->layer_param_.convolution_param().group(); + pad_ = this->layer_param_.convolution_param().pad(); + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_output_ = this->layer_param_.convolution_param().num_output(); + CHECK_GT(num_output_, 0); + CHECK_EQ(channels_ % group_, 0); + // The im2col result buffer would only hold one image at a time to avoid + // overly large memory usage. + int height_out = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; + int width_out = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; + col_buffer_.Reshape( + 1, channels_ * kernel_size_ * kernel_size_, height_out, width_out); + // Set the parameters + CHECK_EQ(num_output_ % group_, 0) + << "Number of output should be multiples of group."; + bias_term_ = this->layer_param_.convolution_param().bias_term(); + // Figure out the dimensions for individual gemms. + M_ = num_output_ / group_; + K_ = channels_ * kernel_size_ * kernel_size_ / group_; + N_ = height_out * width_out; + (*top)[0]->Reshape(bottom[0]->num(), num_output_, height_out, width_out); + // Check if we need to set up the weights + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + if (bias_term_) { + this->blobs_.resize(2); + } else { + this->blobs_.resize(1); + } + // Intialize the weight + this->blobs_[0].reset(new Blob( + num_output_, channels_ / group_, kernel_size_, kernel_size_)); + // fill the weights + shared_ptr > weight_filler(GetFiller( + this->layer_param_.convolution_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get()); + // If necessary, intiialize and fill the bias term + if (bias_term_) { + this->blobs_[1].reset(new Blob(1, 1, 1, num_output_)); + shared_ptr > bias_filler(GetFiller( + this->layer_param_.convolution_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get()); + } + } + // Set up the bias filler + if (bias_term_) { + bias_multiplier_.reset(new SyncedMemory(N_ * sizeof(Dtype))); + Dtype* bias_multiplier_data = + reinterpret_cast(bias_multiplier_->mutable_cpu_data()); + for (int i = 0; i < N_; ++i) { + bias_multiplier_data[i] = 1.; + } + } +} + + +template +Dtype ConvolutionLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* col_data = col_buffer_.mutable_cpu_data(); + const Dtype* weight = this->blobs_[0]->cpu_data(); + int weight_offset = M_ * K_; + int col_offset = K_ * N_; + int top_offset = M_ * N_; + for (int n = 0; n < num_; ++n) { + // First, im2col + im2col_cpu(bottom_data + bottom[0]->offset(n), channels_, height_, + width_, kernel_size_, pad_, stride_, col_data); + // Second, innerproduct with groups + for (int g = 0; g < group_; ++g) { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, K_, + (Dtype)1., weight + weight_offset * g, col_data + col_offset * g, + (Dtype)0., top_data + (*top)[0]->offset(n) + top_offset * g); + } + // third, add bias + if (bias_term_) { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, + N_, 1, (Dtype)1., this->blobs_[1]->cpu_data(), + reinterpret_cast(bias_multiplier_->cpu_data()), + (Dtype)1., top_data + (*top)[0]->offset(n)); + } + } + return Dtype(0.); +} + +template +void ConvolutionLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* weight = this->blobs_[0]->cpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* col_data = col_buffer_.mutable_cpu_data(); + Dtype* col_diff = col_buffer_.mutable_cpu_diff(); + // bias gradient if necessary + Dtype* bias_diff = NULL; + + if (bias_term_) { + bias_diff = this->blobs_[1]->mutable_cpu_diff(); + memset(bias_diff, 0, sizeof(Dtype) * this->blobs_[1]->count()); + for (int n = 0; n < num_; ++n) { + caffe_cpu_gemv(CblasNoTrans, num_output_, N_, + 1., top_diff + top[0]->offset(n), + reinterpret_cast(bias_multiplier_->cpu_data()), 1., + bias_diff); + } + } + + int weight_offset = M_ * K_; + int col_offset = K_ * N_; + int top_offset = M_ * N_; + memset(weight_diff, 0, sizeof(Dtype) * this->blobs_[0]->count()); + for (int n = 0; n < num_; ++n) { + // since we saved memory in the forward pass by not storing all col data, + // we will need to recompute them. + im2col_cpu(bottom_data + (*bottom)[0]->offset(n), channels_, height_, + width_, kernel_size_, pad_, stride_, col_data); + // gradient w.r.t. weight. Note that we will accumulate diffs. + for (int g = 0; g < group_; ++g) { + caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, K_, N_, + (Dtype)1., top_diff + top[0]->offset(n) + top_offset * g, + col_data + col_offset * g, (Dtype)1., + weight_diff + weight_offset * g); + } + // gradient w.r.t. bottom data, if necessary + if (propagate_down) { + for (int g = 0; g < group_; ++g) { + caffe_cpu_gemm(CblasTrans, CblasNoTrans, K_, N_, M_, + (Dtype)1., weight + weight_offset * g, + top_diff + top[0]->offset(n) + top_offset * g, + (Dtype)0., col_diff + col_offset * g); + } + // col2im back to the data + col2im_cpu(col_diff, channels_, height_, width_, kernel_size_, pad_, + stride_, bottom_diff + (*bottom)[0]->offset(n)); + } + } +} + +INSTANTIATE_CLASS(ConvolutionLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/conv_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/conv_layer.cu new file mode 100644 index 000000000..51f5d1598 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/conv_layer.cu @@ -0,0 +1,104 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/filler.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +Dtype ConvolutionLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* col_data = col_buffer_.mutable_gpu_data(); + const Dtype* weight = this->blobs_[0]->gpu_data(); + int weight_offset = M_ * K_; + int col_offset = K_ * N_; + int top_offset = M_ * N_; + for (int n = 0; n < num_; ++n) { + // First, im2col + im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, + width_, kernel_size_, pad_, stride_, col_data); + // Second, innerproduct with groups + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, K_, + (Dtype)1., weight + weight_offset * g, col_data + col_offset * g, + (Dtype)0., top_data + (*top)[0]->offset(n) + top_offset * g); + } + // third, add bias + if (bias_term_) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, + N_, 1, (Dtype)1., this->blobs_[1]->gpu_data(), + reinterpret_cast(bias_multiplier_->gpu_data()), + (Dtype)1., top_data + (*top)[0]->offset(n)); + } + } + return Dtype(0.); +} + +template +void ConvolutionLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* weight = this->blobs_[0]->gpu_data(); + Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + Dtype* col_data = col_buffer_.mutable_gpu_data(); + Dtype* col_diff = col_buffer_.mutable_gpu_diff(); + // bias gradient if necessary + Dtype* bias_diff = NULL; + + if (bias_term_) { + bias_diff = this->blobs_[1]->mutable_gpu_diff(); + CUDA_CHECK(cudaMemset(bias_diff, 0, + sizeof(Dtype) * this->blobs_[1]->count())); + for (int n = 0; n < num_; ++n) { + caffe_gpu_gemv(CblasNoTrans, num_output_, N_, + 1., top_diff + top[0]->offset(n), + reinterpret_cast(bias_multiplier_->gpu_data()), + 1., bias_diff); + } + } + + int weight_offset = M_ * K_; + int col_offset = K_ * N_; + int top_offset = M_ * N_; + CUDA_CHECK(cudaMemset(weight_diff, 0, + sizeof(Dtype) * this->blobs_[0]->count())); + for (int n = 0; n < num_; ++n) { + // since we saved memory in the forward pass by not storing all col data, + // we will need to recompute them. + im2col_gpu(bottom_data + (*bottom)[0]->offset(n), channels_, height_, + width_, kernel_size_, pad_, stride_, col_data); + // gradient w.r.t. weight. Note that we will accumulate diffs. + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, K_, N_, + (Dtype)1., top_diff + top[0]->offset(n) + top_offset * g, + col_data + col_offset * g, (Dtype)1., + weight_diff + weight_offset * g); + } + // gradient w.r.t. bottom data, if necessary + if (propagate_down) { + for (int g = 0; g < group_; ++g) { + caffe_gpu_gemm(CblasTrans, CblasNoTrans, K_, N_, M_, + (Dtype)1., weight + weight_offset * g, + top_diff + top[0]->offset(n) + top_offset * g, + (Dtype)0., col_diff + col_offset * g); + } + // col2im back to the data + col2im_gpu(col_diff, channels_, height_, width_, kernel_size_, pad_, + stride_, bottom_diff + (*bottom)[0]->offset(n)); + } + } +} + + +INSTANTIATE_CLASS(ConvolutionLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/data_layer.cpp new file mode 100644 index 000000000..f12ae1c12 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/data_layer.cpp @@ -0,0 +1,367 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +using std::string; + +namespace caffe { + +template +void* DataLayerPrefetch(void* layer_pointer) { + CHECK(layer_pointer); + DataLayer* layer = static_cast*>(layer_pointer); + CHECK(layer); + Datum datum; + CHECK(layer->prefetch_data_); + Dtype* top_data = layer->prefetch_data_->mutable_cpu_data(); + Dtype* top_label; + if (layer->output_labels_) { + top_label = layer->prefetch_label_->mutable_cpu_data(); + } + const Dtype scale = layer->layer_param_.data_param().scale(); + const int batch_size = layer->layer_param_.data_param().batch_size(); + const int crop_size = layer->layer_param_.data_param().crop_size(); + const bool mirror = layer->layer_param_.data_param().mirror(); + + if (mirror && crop_size == 0) { + LOG(FATAL) << "Current implementation requires mirror and crop_size to be " + << "set at the same time."; + } + // datum scales + const int channels = layer->datum_channels_; + const int height = layer->datum_height_; + const int width = layer->datum_width_; + const int size = layer->datum_size_; + const Dtype* mean = layer->data_mean_.cpu_data(); + for (int item_id = 0; item_id < batch_size; ++item_id) { + // get a blob + switch (layer->layer_param_.data_param().backend()) { + case DataParameter_DB_LEVELDB: + CHECK(layer->iter_); + CHECK(layer->iter_->Valid()); + datum.ParseFromString(layer->iter_->value().ToString()); + break; + case DataParameter_DB_LMDB: + CHECK_EQ(mdb_cursor_get(layer->mdb_cursor_, &layer->mdb_key_, + &layer->mdb_value_, MDB_GET_CURRENT), MDB_SUCCESS); + datum.ParseFromArray(layer->mdb_value_.mv_data, + layer->mdb_value_.mv_size); + break; + default: + LOG(FATAL) << "Unknown database backend"; + } + + const string& data = datum.data(); + if (crop_size) { + CHECK(data.size()) << "Image cropping only support uint8 data"; + int h_off, w_off; + // We only do random crop when we do training. + if (layer->phase_ == Caffe::TRAIN) { + h_off = layer->PrefetchRand() % (height - crop_size); + w_off = layer->PrefetchRand() % (width - crop_size); + } else { + h_off = (height - crop_size) / 2; + w_off = (width - crop_size) / 2; + } + if (mirror && layer->PrefetchRand() % 2) { + // Copy mirrored version + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < crop_size; ++h) { + for (int w = 0; w < crop_size; ++w) { + int top_index = ((item_id * channels + c) * crop_size + h) + * crop_size + (crop_size - 1 - w); + int data_index = (c * height + h + h_off) * width + w + w_off; + Dtype datum_element = + static_cast(static_cast(data[data_index])); + top_data[top_index] = (datum_element - mean[data_index]) * scale; + } + } + } + } else { + // Normal copy + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < crop_size; ++h) { + for (int w = 0; w < crop_size; ++w) { + int top_index = ((item_id * channels + c) * crop_size + h) + * crop_size + w; + int data_index = (c * height + h + h_off) * width + w + w_off; + Dtype datum_element = + static_cast(static_cast(data[data_index])); + top_data[top_index] = (datum_element - mean[data_index]) * scale; + } + } + } + } + } else { + // we will prefer to use data() first, and then try float_data() + if (data.size()) { + for (int j = 0; j < size; ++j) { + Dtype datum_element = + static_cast(static_cast(data[j])); + top_data[item_id * size + j] = (datum_element - mean[j]) * scale; + } + } else { + for (int j = 0; j < size; ++j) { + top_data[item_id * size + j] = + (datum.float_data(j) - mean[j]) * scale; + } + } + } + + if (layer->output_labels_) { + top_label[item_id] = datum.label(); + } + // go to the next iter + switch (layer->layer_param_.data_param().backend()) { + case DataParameter_DB_LEVELDB: + layer->iter_->Next(); + if (!layer->iter_->Valid()) { + // We have reached the end. Restart from the first. + DLOG(INFO) << "Restarting data prefetching from start."; + layer->iter_->SeekToFirst(); + } + break; + case DataParameter_DB_LMDB: + if (mdb_cursor_get(layer->mdb_cursor_, &layer->mdb_key_, + &layer->mdb_value_, MDB_NEXT) != MDB_SUCCESS) { + // We have reached the end. Restart from the first. + DLOG(INFO) << "Restarting data prefetching from start."; + CHECK_EQ(mdb_cursor_get(layer->mdb_cursor_, &layer->mdb_key_, + &layer->mdb_value_, MDB_FIRST), MDB_SUCCESS); + } + break; + default: + LOG(FATAL) << "Unknown database backend"; + } + } + + return static_cast(NULL); +} + +template +DataLayer::~DataLayer() { + JoinPrefetchThread(); + // clean up the database resources + switch (this->layer_param_.data_param().backend()) { + case DataParameter_DB_LEVELDB: + break; // do nothing + case DataParameter_DB_LMDB: + mdb_cursor_close(mdb_cursor_); + mdb_close(mdb_env_, mdb_dbi_); + mdb_txn_abort(mdb_txn_); + mdb_env_close(mdb_env_); + break; + default: + LOG(FATAL) << "Unknown database backend"; + } +} + +template +void DataLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + if (top->size() == 1) { + output_labels_ = false; + } else { + output_labels_ = true; + } + // Initialize DB + switch (this->layer_param_.data_param().backend()) { + case DataParameter_DB_LEVELDB: + { + leveldb::DB* db_temp; + leveldb::Options options; + options.create_if_missing = false; + options.max_open_files = 100; + LOG(INFO) << "Opening leveldb " << this->layer_param_.data_param().source(); + leveldb::Status status = leveldb::DB::Open( + options, this->layer_param_.data_param().source(), &db_temp); + CHECK(status.ok()) << "Failed to open leveldb " + << this->layer_param_.data_param().source() << std::endl + << status.ToString(); + db_.reset(db_temp); + iter_.reset(db_->NewIterator(leveldb::ReadOptions())); + iter_->SeekToFirst(); + } + break; + case DataParameter_DB_LMDB: + CHECK_EQ(mdb_env_create(&mdb_env_), MDB_SUCCESS) << "mdb_env_create failed"; + CHECK_EQ(mdb_env_set_mapsize(mdb_env_, 1099511627776), MDB_SUCCESS); // 1TB + CHECK_EQ(mdb_env_open(mdb_env_, + this->layer_param_.data_param().source().c_str(), + MDB_RDONLY|MDB_NOTLS, 0664), MDB_SUCCESS) << "mdb_env_open failed"; + CHECK_EQ(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn_), MDB_SUCCESS) + << "mdb_txn_begin failed"; + CHECK_EQ(mdb_open(mdb_txn_, NULL, 0, &mdb_dbi_), MDB_SUCCESS) + << "mdb_open failed"; + CHECK_EQ(mdb_cursor_open(mdb_txn_, mdb_dbi_, &mdb_cursor_), MDB_SUCCESS) + << "mdb_cursor_open failed"; + LOG(INFO) << "Opening lmdb " << this->layer_param_.data_param().source(); + CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_FIRST), + MDB_SUCCESS) << "mdb_cursor_get failed"; + break; + default: + LOG(FATAL) << "Unknown database backend"; + } + + // Check if we would need to randomly skip a few data points + if (this->layer_param_.data_param().rand_skip()) { + unsigned int skip = caffe_rng_rand() % + this->layer_param_.data_param().rand_skip(); + LOG(INFO) << "Skipping first " << skip << " data points."; + while (skip-- > 0) { + switch (this->layer_param_.data_param().backend()) { + case DataParameter_DB_LEVELDB: + iter_->Next(); + if (!iter_->Valid()) { + iter_->SeekToFirst(); + } + break; + case DataParameter_DB_LMDB: + if (mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_NEXT) + != MDB_SUCCESS) { + CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, + MDB_FIRST), MDB_SUCCESS); + } + break; + default: + LOG(FATAL) << "Unknown database backend"; + } + } + } + // Read a data point, and use it to initialize the top blob. + Datum datum; + switch (this->layer_param_.data_param().backend()) { + case DataParameter_DB_LEVELDB: + datum.ParseFromString(iter_->value().ToString()); + break; + case DataParameter_DB_LMDB: + datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size); + break; + default: + LOG(FATAL) << "Unknown database backend"; + } + + // image + int crop_size = this->layer_param_.data_param().crop_size(); + if (crop_size > 0) { + (*top)[0]->Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + prefetch_data_.reset(new Blob( + this->layer_param_.data_param().batch_size(), datum.channels(), + crop_size, crop_size)); + } else { + (*top)[0]->Reshape( + this->layer_param_.data_param().batch_size(), datum.channels(), + datum.height(), datum.width()); + prefetch_data_.reset(new Blob( + this->layer_param_.data_param().batch_size(), datum.channels(), + datum.height(), datum.width())); + } + LOG(INFO) << "output data size: " << (*top)[0]->num() << "," + << (*top)[0]->channels() << "," << (*top)[0]->height() << "," + << (*top)[0]->width(); + // label + if (output_labels_) { + (*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1); + prefetch_label_.reset( + new Blob(this->layer_param_.data_param().batch_size(), 1, 1, 1)); + } + // datum size + datum_channels_ = datum.channels(); + datum_height_ = datum.height(); + datum_width_ = datum.width(); + datum_size_ = datum.channels() * datum.height() * datum.width(); + CHECK_GT(datum_height_, crop_size); + CHECK_GT(datum_width_, crop_size); + // check if we want to have mean + if (this->layer_param_.data_param().has_mean_file()) { + const string& mean_file = this->layer_param_.data_param().mean_file(); + LOG(INFO) << "Loading mean file from" << mean_file; + BlobProto blob_proto; + ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); + data_mean_.FromProto(blob_proto); + CHECK_EQ(data_mean_.num(), 1); + CHECK_EQ(data_mean_.channels(), datum_channels_); + CHECK_EQ(data_mean_.height(), datum_height_); + CHECK_EQ(data_mean_.width(), datum_width_); + } else { + // Simply initialize an all-empty mean. + data_mean_.Reshape(1, datum_channels_, datum_height_, datum_width_); + } + // Now, start the prefetch thread. Before calling prefetch, we make two + // cpu_data calls so that the prefetch thread does not accidentally make + // simultaneous cudaMalloc calls when the main thread is running. In some + // GPUs this seems to cause failures if we do not so. + prefetch_data_->mutable_cpu_data(); + if (output_labels_) { + prefetch_label_->mutable_cpu_data(); + } + data_mean_.cpu_data(); + DLOG(INFO) << "Initializing prefetch"; + CreatePrefetchThread(); + DLOG(INFO) << "Prefetch initialized."; +} + +template +void DataLayer::CreatePrefetchThread() { + phase_ = Caffe::phase(); + const bool prefetch_needs_rand = (phase_ == Caffe::TRAIN) && + (this->layer_param_.data_param().mirror() || + this->layer_param_.data_param().crop_size()); + if (prefetch_needs_rand) { + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); + } else { + prefetch_rng_.reset(); + } + // Create the thread. + CHECK(!pthread_create(&thread_, NULL, DataLayerPrefetch, + static_cast(this))) << "Pthread execution failed."; +} + +template +void DataLayer::JoinPrefetchThread() { + CHECK(!pthread_join(thread_, NULL)) << "Pthread joining failed."; +} + +template +unsigned int DataLayer::PrefetchRand() { + CHECK(prefetch_rng_); + caffe::rng_t* prefetch_rng = + static_cast(prefetch_rng_->generator()); + return (*prefetch_rng)(); +} + +template +Dtype DataLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + // First, join the thread + JoinPrefetchThread(); + // Copy the data + caffe_copy(prefetch_data_->count(), prefetch_data_->cpu_data(), + (*top)[0]->mutable_cpu_data()); + if (output_labels_) { + caffe_copy(prefetch_label_->count(), prefetch_label_->cpu_data(), + (*top)[1]->mutable_cpu_data()); + } + // Start a new prefetch thread + CreatePrefetchThread(); + return Dtype(0.); +} + +INSTANTIATE_CLASS(DataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/data_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/data_layer.cu new file mode 100644 index 000000000..2ff9a292b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/data_layer.cu @@ -0,0 +1,39 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +using std::string; + +namespace caffe { + +template +Dtype DataLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + // First, join the thread + JoinPrefetchThread(); + // Copy the data + CUDA_CHECK(cudaMemcpy((*top)[0]->mutable_gpu_data(), + prefetch_data_->cpu_data(), sizeof(Dtype) * prefetch_data_->count(), + cudaMemcpyHostToDevice)); + if (output_labels_) { + CUDA_CHECK(cudaMemcpy((*top)[1]->mutable_gpu_data(), + prefetch_label_->cpu_data(), sizeof(Dtype) * prefetch_label_->count(), + cudaMemcpyHostToDevice)); + } + // Start a new prefetch thread + CreatePrefetchThread(); + return Dtype(0.); +} + +INSTANTIATE_CLASS(DataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/dropout_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/dropout_layer.cpp new file mode 100644 index 000000000..f1e541129 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/dropout_layer.cpp @@ -0,0 +1,68 @@ +// Copyright 2014 BVLC and contributors. + +// TODO (sergeyk): effect should not be dependent on phase. wasted memcpy. + +#include + +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/layer.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void DropoutLayer::SetUp(const vector*>& bottom, + vector*>* top) { + NeuronLayer::SetUp(bottom, top); + // Set up the cache for random number generation + rand_vec_.reset(new Blob(bottom[0]->num(), + bottom[0]->channels(), bottom[0]->height(), bottom[0]->width())); + threshold_ = this->layer_param_.dropout_param().dropout_ratio(); + DCHECK(threshold_ > 0.); + DCHECK(threshold_ < 1.); + scale_ = 1. / (1. - threshold_); + uint_thres_ = static_cast(UINT_MAX * threshold_); +} + +template +Dtype DropoutLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + unsigned int* mask = rand_vec_->mutable_cpu_data(); + const int count = bottom[0]->count(); + if (Caffe::phase() == Caffe::TRAIN) { + // Create random numbers + caffe_rng_bernoulli(count, 1. - threshold_, mask); + for (int i = 0; i < count; ++i) { + top_data[i] = bottom_data[i] * mask[i] * scale_; + } + } else { + caffe_copy(bottom[0]->count(), bottom_data, top_data); + } + return Dtype(0); +} + +template +void DropoutLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + CHECK(Caffe::phase() == Caffe::TRAIN); + if (propagate_down) { + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const unsigned int* mask = rand_vec_->cpu_data(); + const int count = (*bottom)[0]->count(); + for (int i = 0; i < count; ++i) { + bottom_diff[i] = top_diff[i] * mask[i] * scale_; + } + } +} + + +INSTANTIATE_CLASS(DropoutLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/dropout_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/dropout_layer.cu new file mode 100644 index 000000000..3c25d6a12 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/dropout_layer.cu @@ -0,0 +1,78 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + + +template +__global__ void DropoutForward(const int n, const Dtype* in, + const unsigned int* mask, const unsigned int threshold, const float scale, + Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] * (mask[index] > threshold) * scale; + } +} + +template +Dtype DropoutLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + if (Caffe::phase() == Caffe::TRAIN) { + unsigned int* mask = + static_cast(rand_vec_->mutable_gpu_data()); + caffe_gpu_rng_uniform(count, mask); + // set thresholds + // NOLINT_NEXT_LINE(whitespace/operators) + DropoutForward<<>>( + count, bottom_data, mask, uint_thres_, scale_, top_data); + CUDA_POST_KERNEL_CHECK; + } else { + caffe_gpu_copy(count, bottom_data, top_data); + } + return Dtype(0); +} + +template +__global__ void DropoutBackward(const int n, const Dtype* in_diff, + const unsigned int* mask, const unsigned int threshold, const float scale, + Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +template +void DropoutLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + CHECK(Caffe::phase() == Caffe::TRAIN); + if (propagate_down) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const unsigned int* mask = + static_cast(rand_vec_->gpu_data()); + const int count = (*bottom)[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + DropoutBackward<<>>( + count, top_diff, mask, uint_thres_, scale_, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_CLASS(DropoutLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/dummy_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/dummy_data_layer.cpp new file mode 100644 index 000000000..58044f4c9 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/dummy_data_layer.cpp @@ -0,0 +1,100 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void DummyDataLayer::SetUp(const vector*>& bottom, + vector*>* top) { + const int num_top = top->size(); + const DummyDataParameter& param = this->layer_param_.dummy_data_param(); + const int num_data_filler = param.data_filler_size(); + CHECK(num_data_filler == 0 || num_data_filler == 1 || + num_data_filler == num_top) + << "Number of data fillers must be 0, 1 or equal to the number of tops: " + << num_top << "; you specified " << num_data_filler << " data fillers."; + CHECK(param.num_size() == 1 || param.num_size() == num_top) + << "Must specify either a single (1) 'num' or one for each top blob " + << "(" << num_top << "); you specified " << param.num_size() << "."; + CHECK(param.channels_size() == 1 || param.channels_size() == num_top) + << "Must specify either a single (1) 'channels' or one for each top blob " + << "(" << num_top << "); you specified " << param.channels_size() << "."; + CHECK(param.height_size() == 1 || param.height_size() == num_top) + << "Must specify either a single (1) 'height' or one for each top blob " + << "(" << num_top << "); you specified " << param.height_size() << "."; + CHECK(param.width_size() == 1 || param.width_size() == num_top) + << "Must specify either a single (1) 'width' or one for each top blob " + << "(" << num_top << "); you specified " << param.width_size() << "."; + // refill_[i] tells Forward i whether or not to actually refill top Blob i. + // If refill_[i] is false, Forward does nothing for Blob i. We use this to + // avoid wastefully refilling "constant" Blobs in every forward pass. + // We first fill refill_ in with the INVERSE of its final values. + // The first time we run Forward from the SetUp method, we'll fill only the + // Blobs for which refill_ is normally false. These Blobs will never be + // filled again. + refill_.clear(); + fillers_.clear(); + if (num_data_filler <= 1) { + FillerParameter filler_param; + if (num_data_filler == 0) { + filler_param.set_type("constant"); + filler_param.set_value(0); + } else { + filler_param.CopyFrom(param.data_filler(0)); + } + // Refill on each iteration iff not using a constant filler, + // but use the inverse of this rule for the first run. + refill_.resize(1); + refill_[0] = (strcmp(filler_param.type().c_str(), "constant") == 0); + fillers_.resize(1); + fillers_[0].reset(GetFiller(filler_param)); + } else { + refill_.resize(num_top); + fillers_.resize(num_top); + for (int i = 0; i < num_top; ++i) { + fillers_[i].reset(GetFiller(param.data_filler(i))); + // Refill on each iteration iff not using a constant filler, + // but use the inverse of this rule for the first run. + refill_[i] = + (strcmp(param.data_filler(i).type().c_str(), "constant") == 0); + } + } + for (int i = 0; i < num_top; ++i) { + const int num = (param.num_size() == 1) ? param.num(0) : param.num(i); + const int channels = + (param.channels_size() == 1) ? param.channels(0) : param.channels(i); + const int height = + (param.height_size() == 1) ? param.height(0) : param.height(i); + const int width = + (param.width_size() == 1) ? param.width(0) : param.width(i); + (*top)[i]->Reshape(num, channels, height, width); + } + // Run Forward once, with refill_ inverted, to fill the constant Blobs. + this->Forward(bottom, top); + // Invert the inverted refill_ values to refill the desired (non-constant) + // Blobs in every usual forward pass. + for (int i = 0; i < refill_.size(); ++i) { + refill_[i] = !refill_[i]; + } +} + +template +Dtype DummyDataLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + for (int i = 0; i < top->size(); ++i) { + const int filler_id = (fillers_.size() > 1) ? i : 0; + if (refill_[filler_id]) { + fillers_[filler_id]->Fill((*top)[i]); + } + } + return Dtype(0.); +} + +INSTANTIATE_CLASS(DummyDataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/eltwise_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/eltwise_layer.cpp new file mode 100644 index 000000000..5e5d760c9 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/eltwise_layer.cpp @@ -0,0 +1,100 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void EltwiseLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + CHECK(this->layer_param().eltwise_param().coeff_size() == 0 + || this->layer_param().eltwise_param().coeff_size() == bottom.size()) << + "Eltwise Layer takes one coefficient per bottom blob."; + CHECK(!(this->layer_param().eltwise_param().operation() + == EltwiseParameter_EltwiseOp_PROD + && this->layer_param().eltwise_param().coeff_size())) << + "Eltwise layer only takes coefficients for summation."; + const int num = bottom[0]->num(); + const int channels = bottom[0]->channels(); + const int height = bottom[0]->height(); + const int width = bottom[0]->width(); + for (int i = 1; i < bottom.size(); ++i) { + CHECK_EQ(num, bottom[i]->num()); + CHECK_EQ(channels, bottom[i]->channels()); + CHECK_EQ(height, bottom[i]->height()); + CHECK_EQ(width, bottom[i]->width()); + } + (*top)[0]->Reshape(num, channels, height, width); + op_ = this->layer_param_.eltwise_param().operation(); + // Blob-wise coefficients for the elementwise operation. + coeffs_ = vector(bottom.size(), 1); + if (this->layer_param().eltwise_param().coeff_size()) { + for (int i = 0; i < bottom.size(); ++i) { + coeffs_[i] = this->layer_param().eltwise_param().coeff(i); + } + } +} + +template +Dtype EltwiseLayer::Forward_cpu( + const vector*>& bottom, vector*>* top) { + const int count = (*top)[0]->count(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data); + for (int i = 2; i < bottom.size(); ++i) { + caffe_mul(count, top_data, bottom[i]->cpu_data(), top_data); + } + break; + case EltwiseParameter_EltwiseOp_SUM: + caffe_set(count, Dtype(0), top_data); + // TODO(shelhamer) does BLAS optimize to sum for coeff = 1? + for (int i = 0; i < bottom.size(); ++i) { + caffe_axpy(count, coeffs_[i], bottom[i]->cpu_data(), top_data); + } + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } + return Dtype(0.); +} + +template +void EltwiseLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + if (propagate_down) { + const int count = top[0]->count(); + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + for (int i = 0; i < bottom->size(); ++i) { + const Dtype* bottom_data = (*bottom)[i]->cpu_data(); + Dtype* bottom_diff = (*bottom)[i]->mutable_cpu_diff(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + caffe_div(count, top_data, bottom_data, bottom_diff); + caffe_mul(count, bottom_diff, top_diff, bottom_diff); + break; + case EltwiseParameter_EltwiseOp_SUM: + if (coeffs_[i] == Dtype(1)) { + caffe_copy(count, top_diff, bottom_diff); + } else { + caffe_cpu_scale(count, coeffs_[i], top_diff, bottom_diff); + } + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } + } + } +} + +INSTANTIATE_CLASS(EltwiseLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/eltwise_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/eltwise_layer.cu new file mode 100644 index 000000000..75827badb --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/eltwise_layer.cu @@ -0,0 +1,69 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +Dtype EltwiseLayer::Forward_gpu( + const vector*>& bottom, vector*>* top) { + const int count = (*top)[0]->count(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + caffe_gpu_mul(count, bottom[0]->gpu_data(), + bottom[1]->gpu_data(), top_data); + for (int i = 2; i < bottom.size(); ++i) { + caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); + } + break; + case EltwiseParameter_EltwiseOp_SUM: + caffe_gpu_set(count, Dtype(0.), top_data); + // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? + for (int i = 0; i < bottom.size(); ++i) { + caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); + } + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } + return Dtype(0.); +} + +template +void EltwiseLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + if (propagate_down) { + const int count = top[0]->count(); + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + for (int i = 0; i < bottom->size(); ++i) { + const Dtype* bottom_data = (*bottom)[i]->gpu_data(); + Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff(); + switch (op_) { + case EltwiseParameter_EltwiseOp_PROD: + caffe_gpu_div(count, top_data, bottom_data, bottom_diff); + caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); + break; + case EltwiseParameter_EltwiseOp_SUM: + if (coeffs_[i] == Dtype(1.)) { + caffe_gpu_copy(count, top_diff, bottom_diff); + } else { + caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); + } + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } + } + } +} + +INSTANTIATE_CLASS(EltwiseLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/euclidean_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/euclidean_loss_layer.cpp new file mode 100644 index 000000000..a894d470c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/euclidean_loss_layer.cpp @@ -0,0 +1,54 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/io.hpp" + +using std::max; + +namespace caffe { + +template +void EuclideanLossLayer::FurtherSetUp( + const vector*>& bottom, vector*>* top) { + CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); + CHECK_EQ(bottom[0]->height(), bottom[1]->height()); + CHECK_EQ(bottom[0]->width(), bottom[1]->width()); + diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); +} + +template +Dtype EuclideanLossLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + int count = bottom[0]->count(); + caffe_sub( + count, + bottom[0]->cpu_data(), + bottom[1]->cpu_data(), + diff_.mutable_cpu_data()); + Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); + Dtype loss = dot / bottom[0]->num() / Dtype(2); + return loss; +} + +template +void EuclideanLossLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + caffe_cpu_axpby( + (*bottom)[0]->count(), // count + Dtype(1) / (*bottom)[0]->num(), // alpha + diff_.cpu_data(), // a + Dtype(0), // beta + (*bottom)[0]->mutable_cpu_diff()); // b +} + +INSTANTIATE_CLASS(EuclideanLossLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/flatten_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/flatten_layer.cpp new file mode 100644 index 000000000..95f4859f8 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/flatten_layer.cpp @@ -0,0 +1,38 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void FlattenLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + int channels_out = bottom[0]->channels() * bottom[0]->height() + * bottom[0]->width(); + (*top)[0]->Reshape(bottom[0]->num(), channels_out, 1, 1); + count_ = bottom[0]->num() * channels_out; + CHECK_EQ(count_, bottom[0]->count()); + CHECK_EQ(count_, (*top)[0]->count()); +} + +template +Dtype FlattenLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + (*top)[0]->ShareData(*bottom[0]); + return Dtype(0.); +} + +template +void FlattenLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + (*bottom)[0]->ShareDiff(*top[0]); +} + +INSTANTIATE_CLASS(FlattenLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/flatten_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/flatten_layer.cu new file mode 100644 index 000000000..157eeb1dc --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/flatten_layer.cu @@ -0,0 +1,26 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +Dtype FlattenLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + (*top)[0]->ShareData(*bottom[0]); + return Dtype(0.); +} + +template +void FlattenLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + (*bottom)[0]->ShareDiff(*top[0]); +} + +INSTANTIATE_CLASS(FlattenLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_data_layer.cpp new file mode 100644 index 000000000..d5c64f056 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_data_layer.cpp @@ -0,0 +1,123 @@ +// Copyright 2014 BVLC and contributors. +/* +TODO: +- load file in a separate thread ("prefetch") +- can be smarter about the memcpy call instead of doing it row-by-row + :: use util functions caffe_copy, and Blob->offset() + :: don't forget to update hdf5_daa_layer.cu accordingly +- add ability to shuffle filenames if flag is set +*/ +#include +#include +#include +#include // NOLINT(readability/streams) + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +HDF5DataLayer::~HDF5DataLayer() { } + +// Load data and label from HDF5 filename into the class property blobs. +template +void HDF5DataLayer::LoadHDF5FileData(const char* filename) { + LOG(INFO) << "Loading HDF5 file" << filename; + hid_t file_id = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + if (file_id < 0) { + LOG(ERROR) << "Failed opening HDF5 file" << filename; + return; + } + + const int MIN_DATA_DIM = 2; + const int MAX_DATA_DIM = 4; + hdf5_load_nd_dataset( + file_id, "data", MIN_DATA_DIM, MAX_DATA_DIM, &data_blob_); + + const int MIN_LABEL_DIM = 1; + const int MAX_LABEL_DIM = 2; + hdf5_load_nd_dataset( + file_id, "label", MIN_LABEL_DIM, MAX_LABEL_DIM, &label_blob_); + + herr_t status = H5Fclose(file_id); + CHECK_EQ(data_blob_.num(), label_blob_.num()); + LOG(INFO) << "Successully loaded " << data_blob_.num() << " rows"; +} + +template +void HDF5DataLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + // Read the source to parse the filenames. + const string& source = this->layer_param_.hdf5_data_param().source(); + LOG(INFO) << "Loading filename from " << source; + hdf_filenames_.clear(); + std::ifstream source_file(source.c_str()); + if (source_file.is_open()) { + std::string line; + while (source_file >> line) { + hdf_filenames_.push_back(line); + } + } + source_file.close(); + num_files_ = hdf_filenames_.size(); + current_file_ = 0; + LOG(INFO) << "Number of files: " << num_files_; + + // Load the first HDF5 file and initialize the line counter. + LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); + current_row_ = 0; + + // Reshape blobs. + const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); + (*top)[0]->Reshape(batch_size, data_blob_.channels(), + data_blob_.width(), data_blob_.height()); + (*top)[1]->Reshape(batch_size, label_blob_.channels(), + label_blob_.width(), label_blob_.height()); + LOG(INFO) << "output data size: " << (*top)[0]->num() << "," + << (*top)[0]->channels() << "," << (*top)[0]->height() << "," + << (*top)[0]->width(); +} + +template +Dtype HDF5DataLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); + const int data_count = (*top)[0]->count() / (*top)[0]->num(); + const int label_data_count = (*top)[1]->count() / (*top)[1]->num(); + + for (int i = 0; i < batch_size; ++i, ++current_row_) { + if (current_row_ == data_blob_.num()) { + if (num_files_ > 1) { + current_file_ += 1; + if (current_file_ == num_files_) { + current_file_ = 0; + LOG(INFO) << "looping around to first file"; + } + LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); + } + current_row_ = 0; + } + memcpy(&(*top)[0]->mutable_cpu_data()[i * data_count], + &data_blob_.cpu_data()[current_row_ * data_count], + sizeof(Dtype) * data_count); + memcpy(&(*top)[1]->mutable_cpu_data()[i * label_data_count], + &label_blob_.cpu_data()[current_row_ * label_data_count], + sizeof(Dtype) * label_data_count); + } + return Dtype(0.); +} + +// The backward operations are dummy - they do not carry any computation. +template +void HDF5DataLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { } + +INSTANTIATE_CLASS(HDF5DataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_data_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_data_layer.cu new file mode 100644 index 000000000..9c5bb5a81 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_data_layer.cu @@ -0,0 +1,64 @@ +// Copyright 2014 BVLC and contributors. +/* +TODO: +- only load parts of the file, in accordance with a prototxt param "max_mem" +*/ + +#include +#include +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +using std::string; + +namespace caffe { + +template +Dtype HDF5DataLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); + const int data_count = (*top)[0]->count() / (*top)[0]->num(); + const int label_data_count = (*top)[1]->count() / (*top)[1]->num(); + + for (int i = 0; i < batch_size; ++i, ++current_row_) { + if (current_row_ == data_blob_.num()) { + if (num_files_ > 1) { + current_file_ += 1; + + if (current_file_ == num_files_) { + current_file_ = 0; + LOG(INFO) << "looping around to first file"; + } + + LoadHDF5FileData(hdf_filenames_[current_file_].c_str()); + } + current_row_ = 0; + } + CUDA_CHECK(cudaMemcpy( + &(*top)[0]->mutable_gpu_data()[i * data_count], + &data_blob_.cpu_data()[current_row_ * data_count], + sizeof(Dtype) * data_count, + cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaMemcpy( + &(*top)[1]->mutable_gpu_data()[i * label_data_count], + &label_blob_.cpu_data()[current_row_ * label_data_count], + sizeof(Dtype) * label_data_count, + cudaMemcpyHostToDevice)); + } + return Dtype(0.); +} + +template +void HDF5DataLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { +} + +INSTANTIATE_CLASS(HDF5DataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_output_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_output_layer.cpp new file mode 100644 index 000000000..0961b9b73 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_output_layer.cpp @@ -0,0 +1,76 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { +using std::vector; + +template +HDF5OutputLayer::HDF5OutputLayer(const LayerParameter& param) + : Layer(param), + file_name_(param.hdf5_output_param().file_name()) { + /* create a HDF5 file */ + file_id_ = H5Fcreate(file_name_.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(file_id_, 0) << "Failed to open HDF5 file" << file_name_; +} + +template +HDF5OutputLayer::~HDF5OutputLayer() { + herr_t status = H5Fclose(file_id_); + CHECK_GE(status, 0) << "Failed to close HDF5 file " << file_name_; +} + +template +void HDF5OutputLayer::SaveBlobs() { + // TODO: no limit on the number of blobs + LOG(INFO) << "Saving HDF5 file" << file_name_; + CHECK_EQ(data_blob_.num(), label_blob_.num()) << + "data blob and label blob must have the same batch size"; + hdf5_save_nd_dataset(file_id_, HDF5_DATA_DATASET_NAME, data_blob_); + hdf5_save_nd_dataset(file_id_, HDF5_DATA_LABEL_NAME, label_blob_); + LOG(INFO) << "Successfully saved " << data_blob_.num() << " rows"; +} + +template +Dtype HDF5OutputLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + CHECK_GE(bottom.size(), 2); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), + bottom[1]->height(), bottom[1]->width()); + const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + + for (int i = 0; i < bottom[0]->num(); ++i) { + memcpy(&data_blob_.mutable_cpu_data()[i * data_datum_dim], + &bottom[0]->cpu_data()[i * data_datum_dim], + sizeof(Dtype) * data_datum_dim); + memcpy(&label_blob_.mutable_cpu_data()[i * label_datum_dim], + &bottom[1]->cpu_data()[i * label_datum_dim], + sizeof(Dtype) * label_datum_dim); + } + SaveBlobs(); + return Dtype(0.); +} + +template +void HDF5OutputLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + return; +} + +INSTANTIATE_CLASS(HDF5OutputLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_output_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_output_layer.cu new file mode 100644 index 000000000..b99482522 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hdf5_output_layer.cu @@ -0,0 +1,49 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { +using std::vector; + +template +Dtype HDF5OutputLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + CHECK_GE(bottom.size(), 2); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), + bottom[1]->height(), bottom[1]->width()); + const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + + for (int i = 0; i < bottom[0]->num(); ++i) { + CUDA_CHECK(cudaMemcpy(&data_blob_.mutable_cpu_data()[i * data_datum_dim], + &bottom[0]->gpu_data()[i * data_datum_dim], + sizeof(Dtype) * data_datum_dim, cudaMemcpyDeviceToHost)); + CUDA_CHECK(cudaMemcpy(&label_blob_.mutable_cpu_data()[i * label_datum_dim], + &bottom[1]->gpu_data()[i * label_datum_dim], + sizeof(Dtype) * label_datum_dim, cudaMemcpyDeviceToHost)); + } + SaveBlobs(); + return Dtype(0.); +} + +template +void HDF5OutputLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + return; +} + +INSTANTIATE_CLASS(HDF5OutputLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/hinge_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hinge_loss_layer.cpp new file mode 100644 index 000000000..fd00d8c69 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/hinge_loss_layer.cpp @@ -0,0 +1,74 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/io.hpp" + +using std::max; + +namespace caffe { + +template +Dtype HingeLossLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* label = bottom[1]->cpu_data(); + int num = bottom[0]->num(); + int count = bottom[0]->count(); + int dim = count / num; + + caffe_copy(count, bottom_data, bottom_diff); + for (int i = 0; i < num; ++i) { + bottom_diff[i * dim + static_cast(label[i])] *= -1; + } + for (int i = 0; i < num; ++i) { + for (int j = 0; j < dim; ++j) { + bottom_diff[i * dim + j] = max(Dtype(0), 1 + bottom_diff[i * dim + j]); + } + } + switch (this->layer_param_.hinge_loss_param().norm()) { + case HingeLossParameter_Norm_L1: + return caffe_cpu_asum(count, bottom_diff) / num; + case HingeLossParameter_Norm_L2: + return caffe_cpu_dot(count, bottom_diff, bottom_diff) / num; + default: + LOG(FATAL) << "Unknown Norm"; + } +} + +template +void HingeLossLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const Dtype* label = (*bottom)[1]->cpu_data(); + int num = (*bottom)[0]->num(); + int count = (*bottom)[0]->count(); + int dim = count / num; + + for (int i = 0; i < num; ++i) { + bottom_diff[i * dim + static_cast(label[i])] *= -1; + } + + switch (this->layer_param_.hinge_loss_param().norm()) { + case HingeLossParameter_Norm_L1: + caffe_cpu_sign(count, bottom_diff, bottom_diff); + caffe_scal(count, Dtype(1. / num), bottom_diff); + break; + case HingeLossParameter_Norm_L2: + caffe_scal(count, Dtype(2. / num), bottom_diff); + break; + default: + LOG(FATAL) << "Unknown Norm"; + } +} + +INSTANTIATE_CLASS(HingeLossLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/im2col_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/im2col_layer.cpp new file mode 100644 index 000000000..f0c26c9a1 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/im2col_layer.cpp @@ -0,0 +1,52 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/common.hpp" + +namespace caffe { + +template +void Im2colLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + kernel_size_ = this->layer_param_.convolution_param().kernel_size(); + stride_ = this->layer_param_.convolution_param().stride(); + pad_ = this->layer_param_.convolution_param().pad(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + (*top)[0]->Reshape(bottom[0]->num(), channels_ * kernel_size_ * kernel_size_, + (height_ + 2 * pad_ - kernel_size_) / stride_ + 1, + (width_ + 2 * pad_ - kernel_size_) / stride_ + 1); +} + +template +Dtype Im2colLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + for (int n = 0; n < bottom[0]->num(); ++n) { + im2col_cpu(bottom_data + bottom[0]->offset(n), channels_, height_, + width_, kernel_size_, pad_, stride_, top_data + (*top)[0]->offset(n)); + } + return Dtype(0.); +} + +template +void Im2colLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + for (int n = 0; n < top[0]->num(); ++n) { + col2im_cpu(top_diff + top[0]->offset(n), channels_, height_, width_, + kernel_size_, pad_, stride_, bottom_diff + (*bottom)[0]->offset(n)); + } +} + +INSTANTIATE_CLASS(Im2colLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/im2col_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/im2col_layer.cu new file mode 100644 index 000000000..26bc1b979 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/im2col_layer.cu @@ -0,0 +1,38 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/common.hpp" + +namespace caffe { + +template +Dtype Im2colLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + for (int n = 0; n < bottom[0]->num(); ++n) { + im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, + width_, kernel_size_, pad_, stride_, top_data + (*top)[0]->offset(n)); + } + return Dtype(0.); +} + +template +void Im2colLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + for (int n = 0; n < top[0]->num(); ++n) { + col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, + kernel_size_, pad_, stride_, bottom_diff + (*bottom)[0]->offset(n)); + } +} + + +INSTANTIATE_CLASS(Im2colLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/image_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/image_data_layer.cpp new file mode 100644 index 000000000..bbf201626 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/image_data_layer.cpp @@ -0,0 +1,396 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include +#include +#include // NOLINT(readability/streams) +#include // NOLINT(readability/streams) +#include + +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/format.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +using std::iterator; +using std::string; +using std::pair; + +namespace caffe { + +template +void ProcessImageDatum( + const int channels, const int height, const int width, const int size, + const int crop_size, const bool mirror, const Dtype* mean, + const Dtype scale, const Datum datum, const int item_id, Dtype* top_data, + Dtype* top_label) { + const string& data = datum.data(); + if (crop_size > 0) { + CHECK_GT(height, crop_size); + CHECK_GT(width, crop_size); + CHECK(data.size()) << "Image cropping only support uint8 data"; + int h_off, w_off; + // We only do random crop when we do training. + if (Caffe::phase() == Caffe::TRAIN) { + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) + h_off = rand() % (height - crop_size); + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) + w_off = rand() % (width - crop_size); + } else { + h_off = (height - crop_size) / 2; + w_off = (width - crop_size) / 2; + } + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) + if (mirror && rand() % 2) { + // Copy mirrored version + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < crop_size; ++h) { + for (int w = 0; w < crop_size; ++w) { + top_data[((item_id * channels + c) * crop_size + h) * crop_size + + crop_size - 1 - w] = (static_cast((uint8_t) data[(c + * height + h + h_off) * width + w + w_off]) + - mean[(c * height + h + h_off) * width + w + w_off]) * scale; + } + } + } + } else { + // Normal copy + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < crop_size; ++h) { + for (int w = 0; w < crop_size; ++w) { + top_data[(( + item_id * channels + c) * crop_size + h) * crop_size + w] = + (static_cast((uint8_t) data[(c * height + h + h_off) + * width + w + w_off]) + - mean[(c * height + h + h_off) * width + w + w_off]) + * scale; + } + } + } + } + } else { + // Just copy the whole data + if (data.size()) { + for (int j = 0; j < size; ++j) { + top_data[item_id * size + j] = (static_cast((uint8_t) data[j]) + - mean[j]) * scale; + } + } else { + for (int j = 0; j < size; ++j) { + top_data[item_id * size + j] = (datum.float_data(j) - mean[j]) * scale; + } + } + } // if (crop_size > 0) { + + top_label[item_id] = datum.label(); +} + +template +void* ImageDataLayerPrefetch(void* layer_pointer) { + CHECK(layer_pointer); + ImageDataLayer* layer = + reinterpret_cast*>(layer_pointer); + CHECK(layer); + Datum datum; + CHECK(layer->prefetch_data_); + Dtype* top_data = layer->prefetch_data_->mutable_cpu_data(); + Dtype* top_label = layer->prefetch_label_->mutable_cpu_data(); + ImageDataParameter image_data_param = layer->layer_param_.image_data_param(); + const Dtype scale = image_data_param.scale(); + const int batch_size = image_data_param.batch_size(); + const int crop_size = image_data_param.crop_size(); + const bool mirror = image_data_param.mirror(); + const int new_height = image_data_param.new_height(); + const int new_width = image_data_param.new_width(); + const bool images_in_color = image_data_param.images_in_color(); + + if (mirror && crop_size == 0) { + LOG(FATAL) << "Current implementation requires mirror and crop_size to be " + << "set at the same time."; + } + // datum scales + const int channels = layer->datum_channels_; + const int height = layer->datum_height_; + const int width = layer->datum_width_; + const int size = layer->datum_size_; + const int lines_size = layer->lines_.size(); + const Dtype* mean = layer->data_mean_.cpu_data(); + for (int item_id = 0; item_id < batch_size; ++item_id) { + // get a blob + CHECK_GT(lines_size, layer->lines_id_); + if (!ReadImageToDatum(layer->lines_[layer->lines_id_].first, + layer->lines_[layer->lines_id_].second, + new_height, new_width, images_in_color, &datum)) { + continue; + } + ProcessImageDatum(channels, height, width, size, crop_size, mirror, mean, + scale, datum, item_id, top_data, top_label); + + top_label[item_id] = datum.label(); + // go to the next iter + layer->lines_id_++; + if (layer->lines_id_ >= lines_size) { + // We have reached the end. Restart from the first. + DLOG(INFO) << "Restarting data prefetching from start."; + layer->lines_id_ = 0; + if (layer->layer_param_.image_data_param().shuffle()) { + std::random_shuffle(layer->lines_.begin(), layer->lines_.end()); + } + } + } + + return reinterpret_cast(NULL); +} + +template +ImageDataLayer::~ImageDataLayer() { + if (this->layer_param_.image_data_param().has_source()) { + // Finally, join the thread + CHECK(!pthread_join(thread_, NULL)) << "Pthread joining failed."; + } +} + +template +void ImageDataLayer::SetUp(const vector*>& bottom, + vector*>* top) { + is_datum_set_up_ = false; + top_ = top; + Layer::SetUp(bottom, top); + const int new_height = this->layer_param_.image_data_param().new_height(); + const int new_width = this->layer_param_.image_data_param().new_height(); + const bool images_in_color = this->layer_param_.image_data_param().images_in_color(); + + CHECK((new_height == 0 && new_width == 0) || + (new_height > 0 && new_width > 0)) << "Current implementation requires " + "new_height and new_width to be set at the same time."; + // label + (*top)[1]->Reshape(this->layer_param_.image_data_param().batch_size(), + 1, 1, 1); + if (this->layer_param_.image_data_param().has_source()) { + // Read the file with filenames and labels + const string& source = this->layer_param_.image_data_param().source(); + LOG(INFO) << "Opening file " << source; + std::ifstream infile(source.c_str()); + string filename; + int label; + while (infile >> filename >> label) { + lines_.push_back(std::make_pair(filename, label)); + } + + if (this->layer_param_.image_data_param().shuffle()) { + // randomly shuffle data + LOG(INFO) << "Shuffling data"; + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); + ShuffleImages(); + } + LOG(INFO) << "A total of " << lines_.size() << " images."; + + lines_id_ = 0; + // Check if we would need to randomly skip a few data points + if (this->layer_param_.image_data_param().rand_skip()) { + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) + unsigned int skip = rand() % + this->layer_param_.image_data_param().rand_skip(); + LOG(INFO) << "Skipping first " << skip << " data points."; + CHECK_GT(lines_.size(), skip) << "Not enought points to skip"; + lines_id_ = skip; + } + // Read a data point, and use it to initialize the top blob. + Datum datum; + CHECK(ReadImageToDatum(lines_[lines_id_].first, lines_[lines_id_].second, + new_height, new_width, images_in_color, &datum)); + // image + const int crop_size = this->layer_param_.image_data_param().crop_size(); + SetUpWithDatum(crop_size, datum, top); + DLOG(INFO) << "Initializing prefetch"; + CHECK(!pthread_create(&thread_, NULL, ImageDataLayerPrefetch, + reinterpret_cast(this))) << "Pthread execution failed."; + DLOG(INFO) << "Prefetch initialized."; + } // if (this->layer_param_.image_data_param().has_source()) { +} + +template +void ImageDataLayer::SetUpWithDatum( + const int crop_size, const Datum datum, vector*>* top) { + // datum size + datum_channels_ = datum.channels(); + CHECK_GT(datum_channels_, 0); + datum_height_ = datum.height(); + CHECK_GT(datum_height_, 0); + datum_width_ = datum.width(); + CHECK_GT(datum_width_, 0); + datum_size_ = datum.channels() * datum.height() * datum.width(); + + if (crop_size > 0) { + CHECK_GT(datum_height_, crop_size); + CHECK_GT(datum_width_, crop_size); + (*top)[0]->Reshape(this->layer_param_.image_data_param().batch_size(), + datum.channels(), crop_size, crop_size); + prefetch_data_.reset( + new Blob(this->layer_param_.image_data_param().batch_size(), + datum.channels(), crop_size, crop_size)); + } else { + (*top)[0]->Reshape( + this->layer_param_.image_data_param().batch_size(), datum.channels(), + datum.height(), datum.width()); + prefetch_data_.reset(new Blob( + this->layer_param_.image_data_param().batch_size(), datum.channels(), + datum.height(), datum.width())); + } + prefetch_label_.reset( + new Blob(this->layer_param_.image_data_param().batch_size(), + 1, 1, 1)); + + LOG(INFO) << "output data size: " << (*top)[0]->num() << "," + << (*top)[0]->channels() << "," << (*top)[0]->height() << "," + << (*top)[0]->width(); + + // check if we want to have mean + if (this->layer_param_.image_data_param().has_mean_file()) { + BlobProto blob_proto; + string mean_file = this->layer_param_.image_data_param().mean_file(); + LOG(INFO) << "Loading mean file from" << mean_file; + ReadProtoFromBinaryFile(mean_file.c_str(), &blob_proto); + data_mean_.FromProto(blob_proto); + CHECK_EQ(data_mean_.num(), 1); + CHECK_EQ(data_mean_.channels(), datum_channels_); + CHECK_EQ(data_mean_.height(), datum_height_); + CHECK_EQ(data_mean_.width(), datum_width_); + } else { + // Simply initialize an all-empty mean. + data_mean_.Reshape(1, datum_channels_, datum_height_, datum_width_); + } + // Now, start the prefetch thread. Before calling prefetch, we make two + // cpu_data calls so that the prefetch thread does not accidentally make + // simultaneous cudaMalloc calls when the main thread is running. In some + // GPUs this seems to cause failures if we do not so. + prefetch_data_->mutable_cpu_data(); + prefetch_label_->mutable_cpu_data(); + data_mean_.cpu_data(); + + is_datum_set_up_ = true; +} + +template +void ImageDataLayer::CreatePrefetchThread() { + phase_ = Caffe::phase(); + const bool prefetch_needs_rand = + this->layer_param_.image_data_param().shuffle() || + this->layer_param_.image_data_param().crop_size(); + if (prefetch_needs_rand) { + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); + } else { + prefetch_rng_.reset(); + } + // Create the thread. + CHECK(!pthread_create(&thread_, NULL, ImageDataLayerPrefetch, + static_cast(this))) << "Pthread execution failed."; +} + +template +void ImageDataLayer::ShuffleImages() { + const int num_images = lines_.size(); + for (int i = 0; i < num_images; ++i) { + const int max_rand_index = num_images - i; + const int rand_index = PrefetchRand() % max_rand_index; + pair item = lines_[rand_index]; + lines_.erase(lines_.begin() + rand_index); + lines_.push_back(item); + } +} + +template +void ImageDataLayer::JoinPrefetchThread() { + CHECK(!pthread_join(thread_, NULL)) << "Pthread joining failed."; +} + +template +unsigned int ImageDataLayer::PrefetchRand() { + caffe::rng_t* prefetch_rng = + static_cast(prefetch_rng_->generator()); + return (*prefetch_rng)(); +} + +template +void ImageDataLayer::AddImagesAndLabels(const vector& images, + const vector& labels) { + size_t num_images = images.size(); + CHECK_GT(num_images, 0) << "There is no image to add"; + int batch_size = this->layer_param_.image_data_param().batch_size(); + CHECK_LE(num_images, batch_size)<< + "The number of added images " << images.size() << + " must be no greater than the batch size " << batch_size; + CHECK_LE(num_images, labels.size()) << + "The number of images " << images.size() << + " must be no greater than the number of labels " << labels.size(); + + const int crop_size = this->layer_param_.image_data_param().crop_size(); + const bool mirror = this->layer_param_.image_data_param().mirror(); + if (mirror && crop_size == 0) { + LOG(FATAL)<< "Current implementation requires mirror and crop size to be " + << "set at the same time."; + } + const int new_height = this->layer_param_.image_data_param().new_height(); + const int new_width = this->layer_param_.image_data_param().new_height(); + + // TODO: create a thread-safe buffer with Intel TBB concurrent container + // and process the images in multiple threads with boost::thread + Datum datum; + int item_id = 0; + int data_index; + OpenCVImageToDatum(images[item_id], labels[item_id], new_height, new_width, + &datum); + if (!is_datum_set_up_) { + SetUpWithDatum(crop_size, datum, top_); + } + // datum scales + const int channels = this->datum_channels_; + const int height = this->datum_height_; + const int width = this->datum_width_; + const int size = this->datum_size_; + const Dtype* mean = this->data_mean_.cpu_data(); + const Dtype scale = this->layer_param_.image_data_param().scale(); + Dtype* top_data = this->prefetch_data_->mutable_cpu_data(); + Dtype* top_label = this->prefetch_label_->mutable_cpu_data(); + ProcessImageDatum(channels, height, width, size, crop_size, mirror, + mean, scale, datum, item_id, top_data, top_label); + int image_id; + for (item_id = 1; item_id < batch_size; ++item_id) { + image_id = item_id % num_images; + OpenCVImageToDatum(images[image_id], labels[image_id], new_height, + new_width, &datum); + ProcessImageDatum(channels, height, width, size, crop_size, mirror, + mean, scale, datum, item_id, top_data, top_label); + } +} + +template +Dtype ImageDataLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + if (this->layer_param_.image_data_param().has_source()) { + // First, join the thread + JoinPrefetchThread(); + } + // Copy the data + caffe_copy(prefetch_data_->count(), prefetch_data_->cpu_data(), + (*top)[0]->mutable_cpu_data()); + caffe_copy(prefetch_label_->count(), prefetch_label_->cpu_data(), + (*top)[1]->mutable_cpu_data()); + // Start a new prefetch thread + if (this->layer_param_.image_data_param().has_source()) { + CreatePrefetchThread(); + } + return Dtype(0.); +} + +INSTANTIATE_CLASS(ImageDataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/image_data_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/image_data_layer.cu new file mode 100644 index 000000000..dd5bdbc20 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/image_data_layer.cu @@ -0,0 +1,41 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include +#include +#include // NOLINT(readability/streams) +#include // NOLINT(readability/streams) + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +using std::string; +using std::pair; + +namespace caffe { + +template +Dtype ImageDataLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + // First, join the thread + JoinPrefetchThread(); + // Copy the data + caffe_copy(prefetch_data_->count(), prefetch_data_->cpu_data(), + (*top)[0]->mutable_gpu_data()); + caffe_copy(prefetch_label_->count(), prefetch_label_->cpu_data(), + (*top)[1]->mutable_gpu_data()); + // Start a new prefetch thread + CreatePrefetchThread(); + return Dtype(0.); +} + +INSTANTIATE_CLASS(ImageDataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/infogain_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/infogain_loss_layer.cpp new file mode 100644 index 000000000..ab6e67d73 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/infogain_loss_layer.cpp @@ -0,0 +1,76 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/io.hpp" + +using std::max; + +namespace caffe { + +template +void InfogainLossLayer::FurtherSetUp( + const vector*>& bottom, vector*>* top) { + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + + BlobProto blob_proto; + ReadProtoFromBinaryFile( + this->layer_param_.infogain_loss_param().source(), &blob_proto); + infogain_.FromProto(blob_proto); + CHECK_EQ(infogain_.num(), 1); + CHECK_EQ(infogain_.channels(), 1); + CHECK_EQ(infogain_.height(), infogain_.width()); +} + + +template +Dtype InfogainLossLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + const Dtype* infogain_mat = infogain_.cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + CHECK_EQ(infogain_.height(), dim); + Dtype loss = 0; + for (int i = 0; i < num; ++i) { + int label = static_cast(bottom_label[i]); + for (int j = 0; j < dim; ++j) { + Dtype prob = max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD)); + loss -= infogain_mat[label * dim + j] * log(prob); + } + } + return loss / num; +} + +template +void InfogainLossLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* bottom_label = (*bottom)[1]->cpu_data(); + const Dtype* infogain_mat = infogain_.cpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + int num = (*bottom)[0]->num(); + int dim = (*bottom)[0]->count() / (*bottom)[0]->num(); + CHECK_EQ(infogain_.height(), dim); + for (int i = 0; i < num; ++i) { + int label = static_cast(bottom_label[i]); + for (int j = 0; j < dim; ++j) { + Dtype prob = max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD)); + bottom_diff[i * dim + j] = - infogain_mat[label * dim + j] / prob / num; + } + } +} + +INSTANTIATE_CLASS(InfogainLossLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/inner_product_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/inner_product_layer.cpp new file mode 100644 index 000000000..971254c9c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/inner_product_layer.cpp @@ -0,0 +1,100 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void InnerProductLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + const int num_output = this->layer_param_.inner_product_param().num_output(); + bias_term_ = this->layer_param_.inner_product_param().bias_term(); + // Figure out the dimensions + M_ = bottom[0]->num(); + K_ = bottom[0]->count() / bottom[0]->num(); + N_ = num_output; + (*top)[0]->Reshape(bottom[0]->num(), num_output, 1, 1); + // Check if we need to set up the weights + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + if (bias_term_) { + this->blobs_.resize(2); + } else { + this->blobs_.resize(1); + } + // Intialize the weight + this->blobs_[0].reset(new Blob(1, 1, N_, K_)); + // fill the weights + shared_ptr > weight_filler(GetFiller( + this->layer_param_.inner_product_param().weight_filler())); + weight_filler->Fill(this->blobs_[0].get()); + // If necessary, intiialize and fill the bias term + if (bias_term_) { + this->blobs_[1].reset(new Blob(1, 1, 1, N_)); + shared_ptr > bias_filler(GetFiller( + this->layer_param_.inner_product_param().bias_filler())); + bias_filler->Fill(this->blobs_[1].get()); + } + } // parameter initialization + // Setting up the bias multiplier + if (bias_term_) { + bias_multiplier_.reset(new SyncedMemory(M_ * sizeof(Dtype))); + Dtype* bias_multiplier_data = + reinterpret_cast(bias_multiplier_->mutable_cpu_data()); + for (int i = 0; i < M_; ++i) { + bias_multiplier_data[i] = 1.; + } + } +} + +template +Dtype InnerProductLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const Dtype* weight = this->blobs_[0]->cpu_data(); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., + bottom_data, weight, (Dtype)0., top_data); + if (bias_term_) { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., + reinterpret_cast(bias_multiplier_->cpu_data()), + this->blobs_[1]->cpu_data(), (Dtype)1., top_data); + } + return Dtype(0); +} + +template +void InnerProductLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + // Gradient with respect to weight + caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); + if (bias_term_) { + // Gradient with respect to bias + caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, + reinterpret_cast(bias_multiplier_->cpu_data()), (Dtype)0., + this->blobs_[1]->mutable_cpu_diff()); + } + if (propagate_down) { + // Gradient with respect to bottom data + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., + top_diff, this->blobs_[0]->cpu_data(), (Dtype)0., + (*bottom)[0]->mutable_cpu_diff()); + } +} + +INSTANTIATE_CLASS(InnerProductLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/inner_product_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/inner_product_layer.cu new file mode 100644 index 000000000..f139c23c3 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/inner_product_layer.cu @@ -0,0 +1,57 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +Dtype InnerProductLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const Dtype* weight = this->blobs_[0]->gpu_data(); + caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., + bottom_data, weight, (Dtype)0., top_data); + if (bias_term_) { + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., + reinterpret_cast(bias_multiplier_->gpu_data()), + this->blobs_[1]->gpu_data(), (Dtype)1., top_data); + } + return Dtype(0); +} + +template +void InnerProductLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + // Gradient with respect to weight + caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); + if (bias_term_) { + // Gradient with respect to bias + caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, + reinterpret_cast(bias_multiplier_->gpu_data()), + (Dtype)0., this->blobs_[1]->mutable_gpu_diff()); + } + if (propagate_down) { + // Gradient with respect to bottom data + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., + top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., + (*bottom)[0]->mutable_gpu_diff()); + } +} + +INSTANTIATE_CLASS(InnerProductLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/loss_layer.cpp new file mode 100644 index 000000000..14ea975ad --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/loss_layer.cpp @@ -0,0 +1,28 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/io.hpp" + +using std::max; + +namespace caffe { + +template +void LossLayer::SetUp( + const vector*>& bottom, vector*>* top) { + Layer::SetUp(bottom, top); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()) + << "The data and label should have the same number."; + FurtherSetUp(bottom, top); +} + +INSTANTIATE_CLASS(LossLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/lrn_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/lrn_layer.cpp new file mode 100644 index 000000000..071e71985 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/lrn_layer.cpp @@ -0,0 +1,260 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void LRNLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + size_ = this->layer_param_.lrn_param().local_size(); + pre_pad_ = (size_ - 1) / 2; + alpha_ = this->layer_param_.lrn_param().alpha(); + beta_ = this->layer_param_.lrn_param().beta(); + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + (*top)[0]->Reshape(num_, channels_, height_, width_); + scale_.Reshape(num_, channels_, height_, width_); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + { + // Set up split_layer_ to use inputs in the numerator and denominator. + split_top_vec_.clear(); + split_top_vec_.push_back(bottom[0]); + split_top_vec_.push_back(&square_input_); + LayerParameter split_param; + split_layer_.reset(new SplitLayer(split_param)); + split_layer_->SetUp(bottom, &split_top_vec_); + // Set up square_layer_ to square the inputs. + square_input_.Reshape(num_, channels_, height_, width_); + square_bottom_vec_.clear(); + square_top_vec_.clear(); + square_bottom_vec_.push_back(&square_input_); + square_top_vec_.push_back(&square_output_); + LayerParameter square_param; + square_param.mutable_power_param()->set_power(Dtype(2)); + square_layer_.reset(new PowerLayer(square_param)); + square_layer_->SetUp(square_bottom_vec_, &square_top_vec_); + CHECK_EQ(square_output_.num(), num_); + CHECK_EQ(square_output_.channels(), channels_); + CHECK_EQ(square_output_.height(), height_); + CHECK_EQ(square_output_.width(), width_); + // Set up pool_layer_ to sum over square neighborhoods of the input. + pool_top_vec_.clear(); + pool_top_vec_.push_back(&pool_output_); + LayerParameter pool_param; + pool_param.mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_AVE); + pool_param.mutable_pooling_param()->set_pad(pre_pad_); + pool_param.mutable_pooling_param()->set_kernel_size(size_); + pool_layer_.reset(new PoolingLayer(pool_param)); + pool_layer_->SetUp(square_top_vec_, &pool_top_vec_); + CHECK_EQ(pool_output_.num(), num_); + CHECK_EQ(pool_output_.channels(), channels_); + CHECK_EQ(pool_output_.height(), height_); + CHECK_EQ(pool_output_.width(), width_); + // Set up power_layer_ to compute (1 + alpha_/N^2 s)^-beta_, where s is + // the sum of a squared neighborhood (the output of pool_layer_). + power_top_vec_.clear(); + power_top_vec_.push_back(&power_output_); + LayerParameter power_param; + power_param.mutable_power_param()->set_power(-beta_); + power_param.mutable_power_param()->set_scale(alpha_); + power_param.mutable_power_param()->set_shift(Dtype(1)); + power_layer_.reset(new PowerLayer(power_param)); + power_layer_->SetUp(pool_top_vec_, &power_top_vec_); + CHECK_EQ(power_output_.num(), num_); + CHECK_EQ(power_output_.channels(), channels_); + CHECK_EQ(power_output_.height(), height_); + CHECK_EQ(power_output_.width(), width_); + // Set up a product_layer_ to compute outputs by multiplying inputs by the + // inverse demoninator computed by the power layer. + product_bottom_vec_.clear(); + product_bottom_vec_.push_back(bottom[0]); + product_bottom_vec_.push_back(&power_output_); + LayerParameter product_param; + EltwiseParameter* eltwise_param = product_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + product_layer_.reset(new EltwiseLayer(product_param)); + product_layer_->SetUp(product_bottom_vec_, top); + CHECK_EQ((*top)[0]->num(), num_); + CHECK_EQ((*top)[0]->channels(), channels_); + CHECK_EQ((*top)[0]->height(), height_); + CHECK_EQ((*top)[0]->width(), width_); + } + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +Dtype LRNLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + return CrossChannelForward_cpu(bottom, top); + case LRNParameter_NormRegion_WITHIN_CHANNEL: + return WithinChannelForward(bottom, top); + default: + LOG(FATAL) << "Unknown normalization region."; + return Dtype(0); + } +} + +template +Dtype LRNLayer::CrossChannelForward_cpu( + const vector*>& bottom, vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* scale_data = scale_.mutable_cpu_data(); + // start with the constant value + for (int i = 0; i < scale_.count(); ++i) { + scale_data[i] = 1.; + } + Blob padded_square(1, channels_ + size_ - 1, height_, width_); + Dtype* padded_square_data = padded_square.mutable_cpu_data(); + memset(padded_square_data, 0, sizeof(Dtype) * padded_square.count()); + Dtype alpha_over_size = alpha_ / size_; + // go through the images + for (int n = 0; n < num_; ++n) { + // compute the padded square + caffe_sqr(channels_ * height_ * width_, + bottom_data + bottom[0]->offset(n), + padded_square_data + padded_square.offset(0, pre_pad_)); + // Create the first channel scale + for (int c = 0; c < size_; ++c) { + caffe_axpy(height_ * width_, alpha_over_size, + padded_square_data + padded_square.offset(0, c), + scale_data + scale_.offset(n, 0)); + } + for (int c = 1; c < channels_; ++c) { + // copy previous scale + caffe_copy(height_ * width_, + scale_data + scale_.offset(n, c - 1), + scale_data + scale_.offset(n, c)); + // add head + caffe_axpy(height_ * width_, alpha_over_size, + padded_square_data + padded_square.offset(0, c + size_ - 1), + scale_data + scale_.offset(n, c)); + // subtract tail + caffe_axpy(height_ * width_, -alpha_over_size, + padded_square_data + padded_square.offset(0, c - 1), + scale_data + scale_.offset(n, c)); + } + } + + // In the end, compute output + caffe_powx(scale_.count(), scale_data, -beta_, top_data); + caffe_mul(scale_.count(), top_data, bottom_data, top_data); + + return Dtype(0.); +} + +template +Dtype LRNLayer::WithinChannelForward( + const vector*>& bottom, vector*>* top) { + split_layer_->Forward(bottom, &split_top_vec_); + square_layer_->Forward(square_bottom_vec_, &square_top_vec_); + pool_layer_->Forward(square_top_vec_, &pool_top_vec_); + power_layer_->Forward(pool_top_vec_, &power_top_vec_); + product_layer_->Forward(product_bottom_vec_, top); + return Dtype(0.); +} + +template +void LRNLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelBackward_cpu(top, propagate_down, bottom); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelBackward(top, propagate_down, bottom); + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +void LRNLayer::CrossChannelBackward_cpu( + const vector*>& top, const bool propagate_down, + vector*>* bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* scale_data = scale_.cpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Blob padded_ratio(1, channels_ + size_ - 1, height_, width_); + Blob accum_ratio(1, 1, height_, width_); + Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); + Dtype* accum_ratio_data = accum_ratio.mutable_cpu_data(); + // We hack a little bit by using the diff() to store an additional result + Dtype* accum_ratio_times_bottom = accum_ratio.mutable_cpu_diff(); + memset(padded_ratio_data, 0, sizeof(Dtype) * padded_ratio.count()); + Dtype cache_ratio_value = 2. * alpha_ * beta_ / size_; + + caffe_powx(scale_.count(), scale_data, -beta_, bottom_diff); + caffe_mul(scale_.count(), top_diff, bottom_diff, bottom_diff); + + // go through individual data + int inverse_pre_pad = size_ - (size_ + 1) / 2; + for (int n = 0; n < num_; ++n) { + int block_offset = scale_.offset(n); + // first, compute diff_i * y_i / s_i + caffe_mul(channels_ * height_ * width_, + top_diff + block_offset, top_data + block_offset, + padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); + caffe_div(channels_ * height_ * width_, + padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad), + scale_data + block_offset, + padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); + // Now, compute the accumulated ratios and the bottom diff + memset(accum_ratio_data, 0, sizeof(Dtype) * accum_ratio.count()); + for (int c = 0; c < size_ - 1; ++c) { + caffe_axpy(height_ * width_, 1., + padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); + } + for (int c = 0; c < channels_; ++c) { + caffe_axpy(height_ * width_, 1., + padded_ratio_data + padded_ratio.offset(0, c + size_ - 1), + accum_ratio_data); + // compute bottom diff + caffe_mul(height_ * width_, + bottom_data + top[0]->offset(n, c), + accum_ratio_data, accum_ratio_times_bottom); + caffe_axpy(height_ * width_, -cache_ratio_value, + accum_ratio_times_bottom, bottom_diff + top[0]->offset(n, c)); + caffe_axpy(height_ * width_, -1., + padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); + } + } +} + +template +void LRNLayer::WithinChannelBackward( + const vector*>& top, const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + product_layer_->Backward(top, true, &product_bottom_vec_); + power_layer_->Backward(power_top_vec_, true, &pool_top_vec_); + pool_layer_->Backward(pool_top_vec_, true, &square_top_vec_); + square_layer_->Backward(square_top_vec_, true, &square_bottom_vec_); + split_layer_->Backward(split_top_vec_, true, bottom); + } +} + +INSTANTIATE_CLASS(LRNLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/lrn_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/lrn_layer.cu new file mode 100644 index 000000000..b2097eb99 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/lrn_layer.cu @@ -0,0 +1,197 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +__global__ void LRNFillScale(const int nthreads, const Dtype* in, + const int num, const int channels, const int height, + const int width, const int size, const Dtype alpha_over_size, + Dtype* scale) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local offset + int w = index % width; + int h = (index / width) % height; + int n = index / width / height; + int offset = (n * channels * height + h) * width + w; + int step = height * width; + in += offset; + scale += offset; + int head = 0; + int pre_pad = (size - 1) / 2; + int post_pad = size - pre_pad - 1; + Dtype accum_scale = 0; + // fill the scale at [n, :, h, w] + // accumulate values + while (head < post_pad) { + accum_scale += in[head * step] * in[head * step]; + ++head; + } + // until we reach size, nothing needs to be subtracted + while (head < size) { + accum_scale += in[head * step] * in[head * step]; + scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size; + ++head; + } + // both add and subtract + while (head < channels) { + accum_scale += in[head * step] * in[head * step]; + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size; + ++head; + } + // subtract only + while (head < channels + post_pad) { + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size; + ++head; + } + } +} + + +template +Dtype LRNLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + return CrossChannelForward_gpu(bottom, top); + case LRNParameter_NormRegion_WITHIN_CHANNEL: + return WithinChannelForward(bottom, top); + default: + LOG(FATAL) << "Unknown normalization region."; + return Dtype(0); + } +} + +// TODO: check if it would be faster to just put it into the previous kernel. +template +__global__ void LRNComputeOutput(const int nthreads, const Dtype* in, + const Dtype* scale, const Dtype negative_beta, Dtype* out) { + CUDA_KERNEL_LOOP(index, nthreads) { + out[index] = in[index] * pow(scale[index], negative_beta); + } +} + +template +Dtype LRNLayer::CrossChannelForward_gpu( + const vector*>& bottom, vector*>* top) { + // First, compute scale + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* scale_data = scale_.mutable_gpu_data(); + // We will launch one kernel for each pixel location, and have the kernel + // go through all the channels. + int n_threads = num_ * height_ * width_; + // NOLINT_NEXT_LINE(whitespace/operators) + LRNFillScale<<>>( + n_threads, bottom_data, num_, channels_, height_, width_, size_, + alpha_ / size_, scale_data); + CUDA_POST_KERNEL_CHECK; + n_threads = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + LRNComputeOutput<<>>( + n_threads, bottom_data, scale_data, -beta_, top_data); + CUDA_POST_KERNEL_CHECK; + return Dtype(0.); +} + + +template +void LRNLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelBackward_gpu(top, propagate_down, bottom); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + WithinChannelBackward(top, propagate_down, bottom); + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, + const Dtype* top_data, const Dtype* scale, const Dtype* top_diff, + const int num, const int channels, const int height, + const int width, const int size, const Dtype negative_beta, + const Dtype cache_ratio, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local offset + int w = index % width; + int h = (index / width) % height; + int n = index / width / height; + int offset = (n * channels * height + h) * width + w; + int step = height * width; + bottom_data += offset; + top_data += offset; + scale += offset; + top_diff += offset; + bottom_diff += offset; + int head = 0; + int pre_pad = size - (size + 1) / 2; + int post_pad = size - pre_pad - 1; + Dtype accum_ratio = 0; + // accumulate values + while (head < post_pad) { + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; + ++head; + } + // until we reach size, nothing needs to be subtracted + while (head < size) { + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; + ++head; + } + // both add and subtract + while (head < channels) { + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; + ++head; + } + // subtract only + while (head < channels + post_pad) { + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; + ++head; + } + } +} + +template +void LRNLayer::CrossChannelBackward_gpu( + const vector*>& top, const bool propagate_down, + vector*>* bottom) { + int n_threads = num_ * height_ * width_; + // NOLINT_NEXT_LINE(whitespace/operators) + LRNComputeDiff<<>>( + n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), + scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, + size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), + (*bottom)[0]->mutable_gpu_diff()); +} + + +INSTANTIATE_CLASS(LRNLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/memory_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/memory_data_layer.cpp new file mode 100644 index 000000000..15eedb317 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/memory_data_layer.cpp @@ -0,0 +1,50 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void MemoryDataLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + batch_size_ = this->layer_param_.memory_data_param().batch_size(); + datum_channels_ = this->layer_param_.memory_data_param().channels(); + datum_height_ = this->layer_param_.memory_data_param().height(); + datum_width_ = this->layer_param_.memory_data_param().width(); + datum_size_ = datum_channels_ * datum_height_ * datum_width_; + CHECK_GT(batch_size_ * datum_size_, 0) << "batch_size, channels, height," + " and width must be specified and positive in memory_data_param"; + (*top)[0]->Reshape(batch_size_, datum_channels_, datum_height_, datum_width_); + (*top)[1]->Reshape(batch_size_, 1, 1, 1); + data_ = NULL; + labels_ = NULL; +} + +template +void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int n) { + CHECK(data); + CHECK(labels); + CHECK_EQ(n % batch_size_, 0) << "n must be a multiple of batch size"; + data_ = data; + labels_ = labels; + n_ = n; + pos_ = 0; +} + +template +Dtype MemoryDataLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset"; + (*top)[0]->set_cpu_data(data_ + pos_ * datum_size_); + (*top)[1]->set_cpu_data(labels_ + pos_); + pos_ = (pos_ + batch_size_) % n_; + return Dtype(0.); +} + +INSTANTIATE_CLASS(MemoryDataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/multinomial_logistic_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/multinomial_logistic_loss_layer.cpp new file mode 100644 index 000000000..6486621d8 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/multinomial_logistic_loss_layer.cpp @@ -0,0 +1,60 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/io.hpp" + +using std::max; + +namespace caffe { + +template +void MultinomialLogisticLossLayer::FurtherSetUp( + const vector*>& bottom, vector*>* top) { + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); +} + +template +Dtype MultinomialLogisticLossLayer::Forward_cpu( + const vector*>& bottom, vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + Dtype loss = 0; + for (int i = 0; i < num; ++i) { + int label = static_cast(bottom_label[i]); + Dtype prob = max(bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); + loss -= log(prob); + } + return loss / num; +} + +template +void MultinomialLogisticLossLayer::Backward_cpu( + const vector*>& top, const bool propagate_down, + vector*>* bottom) { + const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* bottom_label = (*bottom)[1]->cpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + int num = (*bottom)[0]->num(); + int dim = (*bottom)[0]->count() / (*bottom)[0]->num(); + memset(bottom_diff, 0, sizeof(Dtype) * (*bottom)[0]->count()); + for (int i = 0; i < num; ++i) { + int label = static_cast(bottom_label[i]); + Dtype prob = max(bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); + bottom_diff[i * dim + label] = -1. / prob / num; + } +} + +INSTANTIATE_CLASS(MultinomialLogisticLossLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/neuron_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/neuron_layer.cpp new file mode 100644 index 000000000..1b8fcecd4 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/neuron_layer.cpp @@ -0,0 +1,24 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void NeuronLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + // NeuronLayer allows in-place computations. If the computation is not + // in-place, we will need to initialize the top blob. + if ((*top)[0] != bottom[0]) { + (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + } +} + +INSTANTIATE_CLASS(NeuronLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/pooling_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/pooling_layer.cpp new file mode 100644 index 000000000..8f5f82d6f --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/pooling_layer.cpp @@ -0,0 +1,269 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; +using std::min; + +namespace caffe { + +template +void PoolingLayer::SetUp(const vector*>& bottom, + vector*>* top) { + // Set the max number of top blobs before calling base Layer::SetUp. + // If doing MAX pooling, we can optionally output an extra top Blob + // for the mask. Otherwise, we only have one top Blob. + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) { + max_top_blobs_ = 2; + } else { + max_top_blobs_ = 1; + } + Layer::SetUp(bottom, top); + kernel_size_ = this->layer_param_.pooling_param().kernel_size(); + stride_ = this->layer_param_.pooling_param().stride(); + pad_ = this->layer_param_.pooling_param().pad(); + if (pad_ != 0) { + CHECK(this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE + || this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) + << "Padding implemented only for average and max pooling."; + CHECK_LT(pad_, kernel_size_); + } + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + pooled_height_ = static_cast(ceil(static_cast( + height_ + 2 * pad_ - kernel_size_) / stride_)) + 1; + pooled_width_ = static_cast(ceil(static_cast( + width_ + 2 * pad_ - kernel_size_) / stride_)) + 1; + if (pad_) { + // If we have padding, ensure that the last pooling starts strictly + // inside the image (instead of at the padding); otherwise clip the last. + if ((pooled_height_ - 1) * stride_ >= height_ + pad_) { + --pooled_height_; + } + if ((pooled_width_ - 1) * stride_ >= width_ + pad_) { + --pooled_width_; + } + CHECK_LT((pooled_height_ - 1) * stride_, height_ + pad_); + CHECK_LT((pooled_width_ - 1) * stride_, width_ + pad_); + } + (*top)[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + if (top->size() > 1) { + (*top)[1]->ReshapeLike(*(*top)[0]); + } + // If max pooling, we will initialize the vector index part. + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX && top->size() == 1) { + max_idx_.reset(new Blob(bottom[0]->num(), channels_, + pooled_height_, pooled_width_)); + } + // If stochastic pooling, we will initialize the random index part. + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_STOCHASTIC) { + rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + } +} + +// TODO(Yangqing): Is there a faster way to do pooling in the channel-first +// case? +template +Dtype PoolingLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const int top_count = (*top)[0]->count(); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top->size() > 1; + int* mask; + Dtype* top_mask; + // Different pooling methods. We explicitly do the switch outside the for + // loop to save time, although this results in more code. + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + // Initialize + if (use_top_mask) { + top_mask = (*top)[1]->mutable_cpu_data(); + caffe_set(top_count, Dtype(-1), top_mask); + } else { + mask = max_idx_->mutable_cpu_data(); + caffe_set(top_count, -1, mask); + } + caffe_set(top_count, Dtype(-FLT_MAX), top_data); + // The main loop + for (int n = 0; n < bottom[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_ - pad_; + int wstart = pw * stride_ - pad_; + int hend = min(hstart + kernel_size_, height_); + int wend = min(wstart + kernel_size_, width_); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + const int pool_index = ph * pooled_width_ + pw; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + const int index = h * width_ + w; + if (bottom_data[index] > top_data[pool_index]) { + top_data[pool_index] = bottom_data[index]; + if (use_top_mask) { + top_mask[pool_index] = static_cast(index); + } else { + mask[pool_index] = index; + } + } + } + } + } + } + // compute offset + bottom_data += bottom[0]->offset(0, 1); + top_data += (*top)[0]->offset(0, 1); + if (use_top_mask) { + top_mask += (*top)[0]->offset(0, 1); + } else { + mask += (*top)[0]->offset(0, 1); + } + } + } + break; + case PoolingParameter_PoolMethod_AVE: + for (int i = 0; i < top_count; ++i) { + top_data[i] = 0; + } + // The main loop + for (int n = 0; n < bottom[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_ - pad_; + int wstart = pw * stride_ - pad_; + int hend = min(hstart + kernel_size_, height_ + pad_); + int wend = min(wstart + kernel_size_, width_ + pad_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height_); + wend = min(wend, width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + top_data[ph * pooled_width_ + pw] += + bottom_data[h * width_ + w]; + } + } + top_data[ph * pooled_width_ + pw] /= pool_size; + } + } + // compute offset + bottom_data += bottom[0]->offset(0, 1); + top_data += (*top)[0]->offset(0, 1); + } + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + return Dtype(0.); +} + +template +void PoolingLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + if (!propagate_down) { + return; + } + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + // Different pooling methods. We explicitly do the switch outside the for + // loop to save time, although this results in more codes. + caffe_set((*bottom)[0]->count(), Dtype(0), bottom_diff); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + const int* mask; + const Dtype* top_mask; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + // The main loop + if (use_top_mask) { + top_mask = top[1]->cpu_data(); + } else { + mask = max_idx_->cpu_data(); + } + for (int n = 0; n < top[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + const int index = ph * pooled_width_ + pw; + const int bottom_index = + use_top_mask ? top_mask[index] : mask[index]; + bottom_diff[bottom_index] += top_diff[index]; + } + } + bottom_diff += (*bottom)[0]->offset(0, 1); + top_diff += top[0]->offset(0, 1); + if (use_top_mask) { + top_mask += top[0]->offset(0, 1); + } else { + mask += top[0]->offset(0, 1); + } + } + } + break; + case PoolingParameter_PoolMethod_AVE: + // The main loop + for (int n = 0; n < top[0]->num(); ++n) { + for (int c = 0; c < channels_; ++c) { + for (int ph = 0; ph < pooled_height_; ++ph) { + for (int pw = 0; pw < pooled_width_; ++pw) { + int hstart = ph * stride_ - pad_; + int wstart = pw * stride_ - pad_; + int hend = min(hstart + kernel_size_, height_ + pad_); + int wend = min(wstart + kernel_size_, width_ + pad_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height_); + wend = min(wend, width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + bottom_diff[h * width_ + w] += + top_diff[ph * pooled_width_ + pw] / pool_size; + } + } + } + } + // offset + bottom_diff += (*bottom)[0]->offset(0, 1); + top_diff += top[0]->offset(0, 1); + } + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } +} + + +INSTANTIATE_CLASS(PoolingLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/pooling_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/pooling_layer.cu new file mode 100644 index 000000000..abba6252c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/pooling_layer.cu @@ -0,0 +1,374 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; +using std::min; + +namespace caffe { + +template +__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_size, const int stride, const int pad, Dtype* top_data, + int* mask, Dtype* top_mask) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride - pad; + int wstart = pw * stride - pad; + int hend = min(hstart + kernel_size, height); + int wend = min(wstart + kernel_size, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + Dtype maxval = -FLT_MAX; + int maxidx = -1; + bottom_data += (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (bottom_data[h * width + w] > maxval) { + maxidx = h * width + w; + maxval = bottom_data[maxidx]; + } + } + } + top_data[index] = maxval; + if (mask) { + mask[index] = maxidx; + } else { + top_mask[index] = maxidx; + } + } +} + +template +__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_size, const int stride, const int pad, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride - pad; + int wstart = pw * stride - pad; + int hend = min(hstart + kernel_size, height + pad); + int wend = min(wstart + kernel_size, width + pad); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + Dtype aveval = 0; + bottom_data += (n * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + aveval += bottom_data[h * width + w]; + } + } + top_data[index] = aveval / pool_size; + } +} + +template +__global__ void StoPoolForwardTrain(const int nthreads, + const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_size, const int stride, Dtype* rand_idx, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride; + int hend = min(hstart + kernel_size, height); + int wstart = pw * stride; + int wend = min(wstart + kernel_size, width); + Dtype cumsum = 0.; + bottom_data += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_data[h * width + w]; + } + } + float thres = rand_idx[index] * cumsum; + // Second pass: get value, and set index. + cumsum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_data[h * width + w]; + if (cumsum >= thres) { + rand_idx[index] = ((n * channels + c) * height + h) * width + w; + top_data[index] = bottom_data[h * width + w]; + return; + } + } + } + } +} + + +template +__global__ void StoPoolForwardTest(const int nthreads, + const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_size, const int stride, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride; + int hend = min(hstart + kernel_size, height); + int wstart = pw * stride; + int wend = min(wstart + kernel_size, width); + // We set cumsum to be 0 to avoid divide-by-zero problems + Dtype cumsum = FLT_MIN; + Dtype cumvalues = 0.; + bottom_data += (n * channels + c) * height * width; + // First pass: get sum + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + cumsum += bottom_data[h * width + w]; + cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; + } + } + top_data[index] = cumvalues / cumsum; + } +} + + +template +Dtype PoolingLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + int count = (*top)[0]->count(); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top->size() > 1; + int* mask = NULL; + Dtype* top_mask = NULL; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = (*top)[1]->mutable_gpu_data(); + } else { + mask = max_idx_->mutable_gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_, + pad_, top_data, mask, top_mask); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolForward<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_, + pad_, top_data); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + if (Caffe::phase() == Caffe::TRAIN) { + // We need to create the random index as well. + caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), + rand_idx_.mutable_gpu_data()); + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTrain<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_, + rand_idx_.mutable_gpu_data(), top_data); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolForwardTest<<>>( + count, bottom_data, bottom[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_, + top_data); + } + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + CUDA_POST_KERNEL_CHECK; + return Dtype(0.); +} + + +template +__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, + const int* mask, const Dtype* top_mask, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_size, const int stride, + const int pad, Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = + (h + pad < kernel_size) ? 0 : (h + pad - kernel_size) / stride + 1; + int phend = min((h + pad) / stride + 1, pooled_height); + int pwstart = + (w + pad < kernel_size) ? 0 : (w + pad - kernel_size) / stride + 1; + int pwend = min((w + pad) / stride + 1, pooled_width); + Dtype gradient = 0; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff += offset; + if (mask) { + mask += offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; + } + } + } + } else { + top_mask += offset; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; + } + } + } + } + bottom_diff[index] = gradient; + } +} + +template +__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_size, const int stride, const int pad, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset + int w = index % width + pad; + int h = (index / width) % height + pad; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1; + int phend = min(h / stride + 1, pooled_height); + int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1; + int pwend = min(w / stride + 1, pooled_width); + Dtype gradient = 0; + top_diff += (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int hstart = ph * stride - pad; + int wstart = pw * stride - pad; + int hend = min(hstart + kernel_size, height + pad); + int wend = min(wstart + kernel_size, width + pad); + int pool_size = (hend - hstart) * (wend - wstart); + gradient += top_diff[ph * pooled_width + pw] / pool_size; + } + } + bottom_diff[index] = gradient; + } +} + + +template +__global__ void StoPoolBackward(const int nthreads, + const Dtype* rand_idx, const Dtype* top_diff, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_size, const int stride, Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + // find out the local index + // find out the local offset + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1; + int phend = min(h / stride + 1, pooled_height); + int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1; + int pwend = min(w / stride + 1, pooled_width); + Dtype gradient = 0; + rand_idx += (n * channels + c) * pooled_height * pooled_width; + top_diff += (n * channels + c) * pooled_height * pooled_width; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + gradient += top_diff[ph * pooled_width + pw] * + (index == static_cast(rand_idx[ph * pooled_width + pw])); + } + } + bottom_diff[index] = gradient; + } +} + + +template +void PoolingLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + if (!propagate_down) { + return; + } + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const int count = (*bottom)[0]->count(); + caffe_gpu_set(count, Dtype(0.), bottom_diff); + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + const int* mask = NULL; + const Dtype* top_mask = NULL; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + if (use_top_mask) { + top_mask = top[1]->gpu_data(); + } else { + mask = max_idx_->gpu_data(); + } + // NOLINT_NEXT_LINE(whitespace/operators) + MaxPoolBackward<<>>( + count, top_diff, mask, top_mask, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, + kernel_size_, stride_, pad_, bottom_diff); + break; + case PoolingParameter_PoolMethod_AVE: + // NOLINT_NEXT_LINE(whitespace/operators) + AvePoolBackward<<>>( + count, top_diff, top[0]->num(), channels_, + height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_, + pad_, bottom_diff); + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + // NOLINT_NEXT_LINE(whitespace/operators) + StoPoolBackward<<>>( + count, rand_idx_.gpu_data(), top_diff, + top[0]->num(), channels_, height_, width_, pooled_height_, + pooled_width_, kernel_size_, stride_, bottom_diff); + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + CUDA_POST_KERNEL_CHECK; +} + + +INSTANTIATE_CLASS(PoolingLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/power_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/power_layer.cpp new file mode 100644 index 000000000..85c84423a --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/power_layer.cpp @@ -0,0 +1,105 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + +template +void PowerLayer::SetUp(const vector*>& bottom, + vector*>* top) { + NeuronLayer::SetUp(bottom, top); + power_ = this->layer_param_.power_param().power(); + scale_ = this->layer_param_.power_param().scale(); + shift_ = this->layer_param_.power_param().shift(); + diff_scale_ = power_ * scale_; +} + +// Compute y = (shift + scale * x)^power +template +Dtype PowerLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + // Special case where we can ignore the input: scale or power is 0. + if (diff_scale_ == Dtype(0)) { + Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); + caffe_set(count, value, top_data); + return Dtype(0); + } + const Dtype* bottom_data = bottom[0]->cpu_data(); + caffe_copy(count, bottom_data, top_data); + if (scale_ != Dtype(1)) { + caffe_scal(count, scale_, top_data); + } + if (shift_ != Dtype(0)) { + caffe_add_scalar(count, shift_, top_data); + } + if (power_ != Dtype(1)) { + caffe_powx(count, top_data, power_, top_data); + } + return Dtype(0); +} + +template +void PowerLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const int count = (*bottom)[0]->count(); + const Dtype* top_diff = top[0]->cpu_diff(); + if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { + caffe_set(count, diff_scale_, bottom_diff); + } else { + const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) + // = diff_scale * y / (shift + scale * x) + if (power_ == Dtype(2)) { + // Special case for y = (shift + scale * x)^2 + // -> dy/dx = 2 * scale * (shift + scale * x) + // = diff_scale * shift + diff_scale * scale * x + caffe_cpu_axpby(count, diff_scale_ * scale_, bottom_data, + Dtype(0), bottom_diff); + if (shift_ != Dtype(0)) { + caffe_add_scalar(count, diff_scale_ * shift_, bottom_diff); + } + } else if (shift_ == Dtype(0)) { + // Special case for y = (scale * x)^power + // -> dy/dx = scale * power * (scale * x)^(power - 1) + // = scale * power * (scale * x)^power * (scale * x)^(-1) + // = power * y / x + const Dtype* top_data = top[0]->cpu_data(); + caffe_div(count, top_data, bottom_data, bottom_diff); + caffe_scal(count, power_, bottom_diff); + } else { + caffe_copy(count, bottom_data, bottom_diff); + if (scale_ != Dtype(1)) { + caffe_scal(count, scale_, bottom_diff); + } + if (shift_ != Dtype(0)) { + caffe_add_scalar(count, shift_, bottom_diff); + } + const Dtype* top_data = top[0]->cpu_data(); + caffe_div(count, top_data, bottom_diff, bottom_diff); + if (diff_scale_ != Dtype(1)) { + caffe_scal(count, diff_scale_, bottom_diff); + } + } + } + if (diff_scale_ != Dtype(0)) { + caffe_mul(count, top_diff, bottom_diff, bottom_diff); + } + } +} + +INSTANTIATE_CLASS(PowerLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/power_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/power_layer.cu new file mode 100644 index 000000000..9a25de72d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/power_layer.cu @@ -0,0 +1,92 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + +template +Dtype PowerLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // Special case where we can ignore the input: scale or power is 0. + if (diff_scale_ == Dtype(0)) { + Dtype value = (power_ == 0) ? Dtype(1) : pow(shift_, power_); + caffe_gpu_set(count, value, top_data); + return Dtype(0); + } + const Dtype* bottom_data = bottom[0]->gpu_data(); + caffe_gpu_copy(count, bottom_data, top_data); + if (scale_ != Dtype(1)) { + caffe_gpu_scal(count, scale_, top_data); + } + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, shift_, top_data); + } + if (power_ != Dtype(1)) { + caffe_gpu_powx(count, top_data, power_, top_data); + } + return Dtype(0); +} + +template +void PowerLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const int count = (*bottom)[0]->count(); + const Dtype* top_diff = top[0]->gpu_diff(); + if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { + caffe_gpu_set(count, diff_scale_, bottom_diff); + } else { + const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) + // = diff_scale * y / (shift + scale * x) + if (power_ == Dtype(2)) { + // Special case for y = (shift + scale * x)^2 + // -> dy/dx = 2 * scale * (shift + scale * x) + // = diff_scale * shift + diff_scale * scale * x + caffe_gpu_axpby(count, diff_scale_ * scale_, bottom_data, + Dtype(0), bottom_diff); + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, diff_scale_ * shift_, bottom_diff); + } + } else if (shift_ == Dtype(0)) { + // Special case for y = (scale * x)^power + // -> dy/dx = scale * power * (scale * x)^(power - 1) + // = scale * power * (scale * x)^power * (scale * x)^(-1) + // = power * y / x + const Dtype* top_data = top[0]->gpu_data(); + caffe_gpu_div(count, top_data, bottom_data, bottom_diff); + caffe_gpu_scal(count, power_, bottom_diff); + } else { + caffe_gpu_copy(count, bottom_data, bottom_diff); + if (scale_ != Dtype(1)) { + caffe_gpu_scal(count, scale_, bottom_diff); + } + if (shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, shift_, bottom_diff); + } + const Dtype* top_data = top[0]->gpu_data(); + caffe_gpu_div(count, top_data, bottom_diff, bottom_diff); + if (diff_scale_ != Dtype(1)) { + caffe_gpu_scal(count, diff_scale_, bottom_diff); + } + } + } + caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); + } +} + +INSTANTIATE_CLASS(PowerLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/relu_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/relu_layer.cpp new file mode 100644 index 000000000..7a33e5562 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/relu_layer.cpp @@ -0,0 +1,44 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +using std::max; + +namespace caffe { + +template +Dtype ReLULayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + top_data[i] = max(bottom_data[i], Dtype(0)); + } + return Dtype(0); +} + +template +void ReLULayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const int count = (*bottom)[0]->count(); + for (int i = 0; i < count; ++i) { + bottom_diff[i] = top_diff[i] * (bottom_data[i] > 0); + } + } +} + + +INSTANTIATE_CLASS(ReLULayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/relu_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/relu_layer.cu new file mode 100644 index 000000000..51e5ef26c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/relu_layer.cu @@ -0,0 +1,65 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +using std::max; + +namespace caffe { + +template +__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] > 0 ? in[index] : 0; + } +} + +template +Dtype ReLULayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + ReLUForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; + // << " count: " << count << " bottom_data: " + // << (unsigned long)bottom_data + // << " top_data: " << (unsigned long)top_data + // << " blocks: " << CAFFE_GET_BLOCKS(count) + // << " threads: " << CAFFE_CUDA_NUM_THREADS; + return Dtype(0); +} + +template +__global__ void ReLUBackward(const int n, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index] * (in_data[index] > 0); + } +} + +template +void ReLULayer::Backward_gpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const int count = (*bottom)[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + ReLUBackward<<>>( + count, top_diff, bottom_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_CLASS(ReLULayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp new file mode 100644 index 000000000..a638684f3 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -0,0 +1,65 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + +template +void SigmoidCrossEntropyLossLayer::FurtherSetUp( + const vector*>& bottom, vector*>* top) { + CHECK_EQ(bottom[0]->count(), bottom[1]->count()) << + "SigmoidCrossEntropyLoss Layer inputs must have same count."; + sigmoid_bottom_vec_.clear(); + sigmoid_bottom_vec_.push_back(bottom[0]); + sigmoid_top_vec_.clear(); + sigmoid_top_vec_.push_back(sigmoid_output_.get()); + sigmoid_layer_->SetUp(sigmoid_bottom_vec_, &sigmoid_top_vec_); +} + +template +Dtype SigmoidCrossEntropyLossLayer::Forward_cpu( + const vector*>& bottom, vector*>* top) { + // The forward pass computes the sigmoid outputs. + sigmoid_bottom_vec_[0] = bottom[0]; + sigmoid_layer_->Forward(sigmoid_bottom_vec_, &sigmoid_top_vec_); + // Compute the loss (negative log likelihood) + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); + // Stable version of loss computation from input data + const Dtype* input_data = bottom[0]->cpu_data(); + const Dtype* target = bottom[1]->cpu_data(); + Dtype loss = 0; + for (int i = 0; i < count; ++i) { + loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - + log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); + } + return loss / num; +} + +template +void SigmoidCrossEntropyLossLayer::Backward_cpu( + const vector*>& top, const bool propagate_down, + vector*>* bottom) { + // First, compute the diff + const int count = (*bottom)[0]->count(); + const int num = (*bottom)[0]->num(); + const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data(); + const Dtype* target = (*bottom)[1]->cpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + caffe_sub(count, sigmoid_output_data, target, bottom_diff); + // Scale down gradient + caffe_scal(count, Dtype(1) / num, bottom_diff); +} + +INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu new file mode 100644 index 000000000..61004541f --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -0,0 +1,54 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + +template +Dtype SigmoidCrossEntropyLossLayer::Forward_gpu( + const vector*>& bottom, vector*>* top) { + // The forward pass computes the sigmoid outputs. + sigmoid_bottom_vec_[0] = bottom[0]; + sigmoid_layer_->Forward(sigmoid_bottom_vec_, &sigmoid_top_vec_); + // Compute the loss (negative log likelihood) + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); + // Stable version of loss computation from input data + const Dtype* input_data = bottom[0]->cpu_data(); + const Dtype* target = bottom[1]->cpu_data(); + Dtype loss = 0; + for (int i = 0; i < count; ++i) { + loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - + log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); + } + return loss / num; +} + +template +void SigmoidCrossEntropyLossLayer::Backward_gpu( + const vector*>& top, const bool propagate_down, + vector*>* bottom) { + // First, compute the diff + const int count = (*bottom)[0]->count(); + const int num = (*bottom)[0]->num(); + const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); + const Dtype* target = (*bottom)[1]->gpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + caffe_gpu_copy(count, sigmoid_output_data, bottom_diff); + caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); + // Scale down gradient + caffe_gpu_scal(count, Dtype(1) / num, bottom_diff); +} + +INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_layer.cpp new file mode 100644 index 000000000..88a7920fc --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_layer.cpp @@ -0,0 +1,48 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +inline Dtype sigmoid(Dtype x) { + return 1. / (1. + exp(-x)); +} + +template +Dtype SigmoidLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + top_data[i] = sigmoid(bottom_data[i]); + } + return Dtype(0); +} + +template +void SigmoidLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const int count = (*bottom)[0]->count(); + for (int i = 0; i < count; ++i) { + const Dtype sigmoid_x = top_data[i]; + bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); + } + } +} + +INSTANTIATE_CLASS(SigmoidLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_layer.cu new file mode 100644 index 000000000..aa8568abb --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/sigmoid_layer.cu @@ -0,0 +1,67 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +using std::max; + +namespace caffe { + +template +__global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = 1. / (1. + exp(-in[index])); + } +} + +template +Dtype SigmoidLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + SigmoidForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; + // << " count: " << count << " bottom_data: " + // << (unsigned long)bottom_data + // << " top_data: " << (unsigned long)top_data + // << " blocks: " << CAFFE_GET_BLOCKS(count) + // << " threads: " << CAFFE_CUDA_NUM_THREADS; + return Dtype(0); +} + +template +__global__ void SigmoidBackward(const int n, const Dtype* in_diff, + const Dtype* out_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + const Dtype sigmoid_x = out_data[index]; + out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); + } +} + +template +void SigmoidLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const int count = (*bottom)[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + SigmoidBackward<<>>( + count, top_diff, top_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_CLASS(SigmoidLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_layer.cpp new file mode 100644 index 000000000..dbe16da23 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_layer.cpp @@ -0,0 +1,88 @@ +// Copyright 2014 BVLC and contributors. +// +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + +template +void SoftmaxLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + sum_multiplier_.Reshape(1, bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); + for (int i = 0; i < sum_multiplier_.count(); ++i) { + multiplier_data[i] = 1.; + } + scale_.Reshape(bottom[0]->num(), 1, 1, 1); +} + +template +Dtype SoftmaxLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* scale_data = scale_.mutable_cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + memcpy(top_data, bottom_data, sizeof(Dtype) * bottom[0]->count()); + // we need to subtract the max to avoid numerical issues, compute the exp, + // and then normalize. + for (int i = 0; i < num; ++i) { + scale_data[i] = bottom_data[i*dim]; + for (int j = 0; j < dim; ++j) { + scale_data[i] = max(scale_data[i], bottom_data[i * dim + j]); + } + } + // subtraction + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + scale_data, sum_multiplier_.cpu_data(), 1., top_data); + // Perform exponentiation + caffe_exp(num * dim, top_data, top_data); + // sum after exp + caffe_cpu_gemv(CblasNoTrans, num, dim, 1., top_data, + sum_multiplier_.cpu_data(), 0., scale_data); + // Do division + for (int i = 0; i < num; ++i) { + caffe_scal(dim, Dtype(1.) / scale_data[i], top_data + i * dim); + } + return Dtype(0); +} + +template +void SoftmaxLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* top_data = top[0]->cpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* scale_data = scale_.mutable_cpu_data(); + int num = top[0]->num(); + int dim = top[0]->count() / top[0]->num(); + memcpy(bottom_diff, top_diff, sizeof(Dtype) * top[0]->count()); + // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff + for (int i = 0; i < num; ++i) { + scale_data[i] = caffe_cpu_dot(dim, top_diff + i * dim, + top_data + i * dim); + } + // subtraction + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + scale_data, sum_multiplier_.cpu_data(), 1., bottom_diff); + // elementwise multiplication + caffe_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); +} + + +INSTANTIATE_CLASS(SoftmaxLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_layer.cu new file mode 100644 index 000000000..a264a819b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_layer.cu @@ -0,0 +1,112 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "thrust/device_vector.h" + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + +template +__global__ void kernel_get_max(const int num, const int dim, + const Dtype* data, Dtype* out) { + CUDA_KERNEL_LOOP(index, num) { + Dtype maxval = -FLT_MAX; + for (int i = 0; i < dim; ++i) { + maxval = max(data[index * dim + i], maxval); + } + out[index] = maxval; + } +} + +template +__global__ void kernel_softmax_div(const int num, const int dim, + const Dtype* scale, Dtype* data) { + CUDA_KERNEL_LOOP(index, num * dim) { + int n = index / dim; + data[index] /= scale[n]; + } +} + +template +__global__ void kernel_exp(const int num, const Dtype* data, Dtype* out) { + CUDA_KERNEL_LOOP(index, num) { + out[index] = exp(data[index]); + } +} + +template +Dtype SoftmaxLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* scale_data = scale_.mutable_gpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + CUDA_CHECK(cudaMemcpy(top_data, bottom_data, + sizeof(Dtype) * bottom[0]->count(), cudaMemcpyDeviceToDevice)); + // we need to subtract the max to avoid numerical issues, compute the exp, + // and then normalize. + // Compute max + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_get_max<<>>( + num, dim, bottom_data, scale_data); + // subtraction + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + scale_data, sum_multiplier_.gpu_data(), 1., top_data); + // Perform exponentiation + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_exp<<>>( + num * dim, top_data, top_data); + // sum after exp + caffe_gpu_gemv(CblasNoTrans, num, dim, 1., top_data, + sum_multiplier_.gpu_data(), 0., scale_data); + // Do division + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_softmax_div<<>>( + num, dim, scale_data, top_data); + return Dtype(0); +} + +// TODO(Yangqing): implement the GPU version of softmax. +template +void SoftmaxLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* top_data = top[0]->gpu_data(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + int num = top[0]->num(); + int dim = top[0]->count() / top[0]->num(); + CUDA_CHECK(cudaMemcpy(bottom_diff, top_diff, + sizeof(Dtype) * top[0]->count(), cudaMemcpyDeviceToDevice)); + // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff + // cuda dot returns the result to cpu, so we temporarily change the pointer + // mode + CUBLAS_CHECK(cublasSetPointerMode(Caffe::cublas_handle(), + CUBLAS_POINTER_MODE_DEVICE)); + Dtype* scale_data = scale_.mutable_gpu_data(); + for (int i = 0; i < num; ++i) { + caffe_gpu_dot(dim, top_diff + i * dim, + top_data + i * dim, scale_data + i); + } + CUBLAS_CHECK(cublasSetPointerMode(Caffe::cublas_handle(), + CUBLAS_POINTER_MODE_HOST)); + // subtraction + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., + scale_.gpu_data(), sum_multiplier_.gpu_data(), 1., bottom_diff); + // elementwise multiplication + caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff); +} + +INSTANTIATE_CLASS(SoftmaxLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_loss_layer.cpp new file mode 100644 index 000000000..ef6eebaba --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_loss_layer.cpp @@ -0,0 +1,65 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + +template +void SoftmaxWithLossLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + softmax_bottom_vec_.clear(); + softmax_bottom_vec_.push_back(bottom[0]); + softmax_top_vec_.push_back(&prob_); + softmax_layer_->SetUp(softmax_bottom_vec_, &softmax_top_vec_); +} + +template +Dtype SoftmaxWithLossLayer::Forward_cpu( + const vector*>& bottom, vector*>* top) { + // The forward pass computes the softmax prob values. + softmax_bottom_vec_[0] = bottom[0]; + softmax_layer_->Forward(softmax_bottom_vec_, &softmax_top_vec_); + const Dtype* prob_data = prob_.cpu_data(); + const Dtype* label = bottom[1]->cpu_data(); + int num = prob_.num(); + int dim = prob_.count() / num; + Dtype loss = 0; + for (int i = 0; i < num; ++i) { + loss += -log(max(prob_data[i * dim + static_cast(label[i])], + Dtype(FLT_MIN))); + } + return loss / num; +} + +template +void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + // Compute the diff + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const Dtype* prob_data = prob_.cpu_data(); + memcpy(bottom_diff, prob_data, sizeof(Dtype) * prob_.count()); + const Dtype* label = (*bottom)[1]->cpu_data(); + int num = prob_.num(); + int dim = prob_.count() / num; + for (int i = 0; i < num; ++i) { + bottom_diff[i * dim + static_cast(label[i])] -= 1; + } + // Scale down gradient + caffe_scal(prob_.count(), Dtype(1) / num, bottom_diff); +} + + +INSTANTIATE_CLASS(SoftmaxWithLossLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_loss_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_loss_layer.cu new file mode 100644 index 000000000..24a3c384c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/softmax_loss_layer.cu @@ -0,0 +1,32 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; + +namespace caffe { + +template +Dtype SoftmaxWithLossLayer::Forward_gpu( + const vector*>& bottom, vector*>* top) { + // The forward pass computes the softmax prob values. + return Forward_cpu(bottom, top); +} + +template +void SoftmaxWithLossLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + // TODO(Yangqing): implement the GPU version of softmax. + Backward_cpu(top, propagate_down, bottom); +} + +INSTANTIATE_CLASS(SoftmaxWithLossLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/split_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/split_layer.cpp new file mode 100644 index 000000000..2f99ca184 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/split_layer.cpp @@ -0,0 +1,55 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void SplitLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + count_ = bottom[0]->count(); + for (int i = 0; i < top->size(); ++i) { + // Allow the 0th top blob to be 'in-place', but no others. + if (i == 0 && (*top)[i] == bottom[0]) { + continue; + } else { + CHECK_NE((*top)[i], bottom[0]) << "Only 0th top blob may be in place."; + } + (*top)[i]->Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + CHECK_EQ(count_, (*top)[i]->count()); + } +} + +template +Dtype SplitLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + for (int i = 0; i < top->size(); ++i) { + (*top)[i]->ShareData(*bottom[0]); + } + return Dtype(0.); +} + +template +void SplitLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + if (propagate_down) { + (*bottom)[0]->ShareDiff(*top[0]); + // Add remaining top blob diffs. + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + for (int i = 1; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->cpu_diff(); + caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); + } + } +} + + +INSTANTIATE_CLASS(SplitLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/split_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/split_layer.cu new file mode 100644 index 000000000..e2269b8be --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/split_layer.cu @@ -0,0 +1,37 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +Dtype SplitLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + for (int i = 0; i < top->size(); ++i) { + (*top)[i]->ShareData(*bottom[0]); + } + return Dtype(0.); +} + +template +void SplitLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + if (propagate_down) { + (*bottom)[0]->ShareDiff(*top[0]); + // Add remaining top blob diffs. + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + for (int i = 1; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); + } + } +} + + +INSTANTIATE_CLASS(SplitLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/tanh_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/tanh_layer.cpp new file mode 100644 index 000000000..66f530f82 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/tanh_layer.cpp @@ -0,0 +1,47 @@ +// Copyright 2014 BVLC and contributors. +// TanH neuron activation function layer. +// Adapted from ReLU layer code written by Yangqing Jia + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +Dtype TanHLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype exp2x; + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + exp2x = exp(2 * bottom_data[i]); + top_data[i] = (exp2x - Dtype(1)) / (exp2x + Dtype(1)); + } + return Dtype(0); +} + +template +void TanHLayer::Backward_cpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* top_diff = top[0]->cpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const int count = (*bottom)[0]->count(); + Dtype exp2x; + Dtype tanhx; + for (int i = 0; i < count; ++i) { + tanhx = top_data[i]; + bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx); + } + } +} + +INSTANTIATE_CLASS(TanHLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/tanh_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/tanh_layer.cu new file mode 100644 index 000000000..aa822d84f --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/tanh_layer.cu @@ -0,0 +1,62 @@ +// Copyright 2014 BVLC and contributors. +// TanH neuron activation function layer. +// Adapted from ReLU layer code written by Yangqing Jia + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + Dtype exp2x = exp(2 * in[index]); + out[index] = (exp2x - Dtype(1)) / (exp2x + Dtype(1)); + } +} + +template +Dtype TanHLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + TanHForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; + return Dtype(0); +} + +template +__global__ void TanHBackward(const int n, const Dtype* in_diff, + const Dtype* out_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + Dtype tanhx = out_data[index]; + out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); + } +} + +template +void TanHLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, + vector*>* bottom) { + if (propagate_down) { + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const int count = (*bottom)[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + TanHBackward<<>>( + count, top_diff, top_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_CLASS(TanHLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/threshold_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/threshold_layer.cpp new file mode 100644 index 000000000..e6ed8a6b4 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/threshold_layer.cpp @@ -0,0 +1,32 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + + +namespace caffe { + +template +void ThresholdLayer::SetUp(const vector*>& bottom, + vector*>* top) { + NeuronLayer::SetUp(bottom, top); + threshold_ = this->layer_param_.threshold_param().threshold(); +} + +template +Dtype ThresholdLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const int count = bottom[0]->count(); + for (int i = 0; i < count; ++i) { + top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); + } + return Dtype(0); +} + +INSTANTIATE_CLASS(ThresholdLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/threshold_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/threshold_layer.cu new file mode 100644 index 000000000..03eb3f32d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/threshold_layer.cu @@ -0,0 +1,39 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" + +using std::max; + +namespace caffe { + +template +__global__ void ThresholdForward(const int n, const Dtype threshold, + const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] > threshold ? 1 : 0; + } +} + +template +Dtype ThresholdLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + ThresholdForward<<>>( + count, threshold_, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; + + return Dtype(0); +} + + +INSTANTIATE_CLASS(ThresholdLayer); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/window_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/layers/window_data_layer.cpp new file mode 100644 index 000000000..e08bed7d5 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/window_data_layer.cpp @@ -0,0 +1,455 @@ +// Copyright 2014 BVLC and contributors. +// +// Based on data_layer.cpp by Yangqing Jia. + +#include +#include + +#include +#include +#include +#include +#include // NOLINT(readability/streams) +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" +#include "caffe/vision_layers.hpp" + +using std::string; +using std::map; +using std::pair; + +// caffe.proto > LayerParameter > WindowDataParameter +// 'source' field specifies the window_file +// 'crop_size' indicates the desired warped size + +namespace caffe { + +template +void* WindowDataLayerPrefetch(void* layer_pointer) { + WindowDataLayer* layer = + reinterpret_cast*>(layer_pointer); + + // At each iteration, sample N windows where N*p are foreground (object) + // windows and N*(1-p) are background (non-object) windows + + Dtype* top_data = layer->prefetch_data_->mutable_cpu_data(); + Dtype* top_label = layer->prefetch_label_->mutable_cpu_data(); + const Dtype scale = layer->layer_param_.window_data_param().scale(); + const int batch_size = layer->layer_param_.window_data_param().batch_size(); + const int crop_size = layer->layer_param_.window_data_param().crop_size(); + const int context_pad = layer->layer_param_.window_data_param().context_pad(); + const bool mirror = layer->layer_param_.window_data_param().mirror(); + const float fg_fraction = + layer->layer_param_.window_data_param().fg_fraction(); + const Dtype* mean = layer->data_mean_.cpu_data(); + const int mean_off = (layer->data_mean_.width() - crop_size) / 2; + const int mean_width = layer->data_mean_.width(); + const int mean_height = layer->data_mean_.height(); + cv::Size cv_crop_size(crop_size, crop_size); + const string& crop_mode = layer->layer_param_.window_data_param().crop_mode(); + + bool use_square = (crop_mode == "square") ? true : false; + + // zero out batch + memset(top_data, 0, sizeof(Dtype)*layer->prefetch_data_->count()); + + const int num_fg = static_cast(static_cast(batch_size) + * fg_fraction); + const int num_samples[2] = { batch_size - num_fg, num_fg }; + + int item_id = 0; + // sample from bg set then fg set + for (int is_fg = 0; is_fg < 2; ++is_fg) { + for (int dummy = 0; dummy < num_samples[is_fg]; ++dummy) { + // sample a window + const unsigned int rand_index = layer->PrefetchRand(); + vector window = (is_fg) ? + layer->fg_windows_[rand_index % layer->fg_windows_.size()] : + layer->bg_windows_[rand_index % layer->bg_windows_.size()]; + + bool do_mirror = false; + if (mirror && layer->PrefetchRand() % 2) { + do_mirror = true; + } + + // load the image containing the window + pair > image = + layer->image_database_[window[WindowDataLayer::IMAGE_INDEX]]; + + cv::Mat cv_img = cv::imread(image.first, CV_LOAD_IMAGE_COLOR); + if (!cv_img.data) { + LOG(ERROR) << "Could not open or find file " << image.first; + return reinterpret_cast(NULL); + } + const int channels = cv_img.channels(); + + // crop window out of image and warp it + int x1 = window[WindowDataLayer::X1]; + int y1 = window[WindowDataLayer::Y1]; + int x2 = window[WindowDataLayer::X2]; + int y2 = window[WindowDataLayer::Y2]; + + int pad_w = 0; + int pad_h = 0; + if (context_pad > 0 || use_square) { + // scale factor by which to expand the original region + // such that after warping the expanded region to crop_size x crop_size + // there's exactly context_pad amount of padding on each side + Dtype context_scale = static_cast(crop_size) / + static_cast(crop_size - 2*context_pad); + + // compute the expanded region + Dtype half_height = static_cast(y2-y1+1)/2.0; + Dtype half_width = static_cast(x2-x1+1)/2.0; + Dtype center_x = static_cast(x1) + half_width; + Dtype center_y = static_cast(y1) + half_height; + if (use_square) { + if (half_height > half_width) { + half_width = half_height; + } else { + half_height = half_width; + } + } + x1 = static_cast(round(center_x - half_width*context_scale)); + x2 = static_cast(round(center_x + half_width*context_scale)); + y1 = static_cast(round(center_y - half_height*context_scale)); + y2 = static_cast(round(center_y + half_height*context_scale)); + + // the expanded region may go outside of the image + // so we compute the clipped (expanded) region and keep track of + // the extent beyond the image + int unclipped_height = y2-y1+1; + int unclipped_width = x2-x1+1; + int pad_x1 = std::max(0, -x1); + int pad_y1 = std::max(0, -y1); + int pad_x2 = std::max(0, x2 - cv_img.cols + 1); + int pad_y2 = std::max(0, y2 - cv_img.rows + 1); + // clip bounds + x1 = x1 + pad_x1; + x2 = x2 - pad_x2; + y1 = y1 + pad_y1; + y2 = y2 - pad_y2; + CHECK_GT(x1, -1); + CHECK_GT(y1, -1); + CHECK_LT(x2, cv_img.cols); + CHECK_LT(y2, cv_img.rows); + + int clipped_height = y2-y1+1; + int clipped_width = x2-x1+1; + + // scale factors that would be used to warp the unclipped + // expanded region + Dtype scale_x = + static_cast(crop_size)/static_cast(unclipped_width); + Dtype scale_y = + static_cast(crop_size)/static_cast(unclipped_height); + + // size to warp the clipped expanded region to + cv_crop_size.width = + static_cast(round(static_cast(clipped_width)*scale_x)); + cv_crop_size.height = + static_cast(round(static_cast(clipped_height)*scale_y)); + pad_x1 = static_cast(round(static_cast(pad_x1)*scale_x)); + pad_x2 = static_cast(round(static_cast(pad_x2)*scale_x)); + pad_y1 = static_cast(round(static_cast(pad_y1)*scale_y)); + pad_y2 = static_cast(round(static_cast(pad_y2)*scale_y)); + + pad_h = pad_y1; + // if we're mirroring, we mirror the padding too (to be pedantic) + if (do_mirror) { + pad_w = pad_x2; + } else { + pad_w = pad_x1; + } + + // ensure that the warped, clipped region plus the padding fits in the + // crop_size x crop_size image (it might not due to rounding) + if (pad_h + cv_crop_size.height > crop_size) { + cv_crop_size.height = crop_size - pad_h; + } + if (pad_w + cv_crop_size.width > crop_size) { + cv_crop_size.width = crop_size - pad_w; + } + } + + cv::Rect roi(x1, y1, x2-x1+1, y2-y1+1); + cv::Mat cv_cropped_img = cv_img(roi); + cv::resize(cv_cropped_img, cv_cropped_img, + cv_crop_size, 0, 0, cv::INTER_LINEAR); + + // horizontal flip at random + if (do_mirror) { + cv::flip(cv_cropped_img, cv_cropped_img, 1); + } + + // copy the warped window into top_data + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < cv_cropped_img.rows; ++h) { + for (int w = 0; w < cv_cropped_img.cols; ++w) { + Dtype pixel = + static_cast(cv_cropped_img.at(h, w)[c]); + + top_data[((item_id * channels + c) * crop_size + h + pad_h) + * crop_size + w + pad_w] + = (pixel + - mean[(c * mean_height + h + mean_off + pad_h) + * mean_width + w + mean_off + pad_w]) + * scale; + } + } + } + + // get window label + top_label[item_id] = window[WindowDataLayer::LABEL]; + + #if 0 + // useful debugging code for dumping transformed windows to disk + string file_id; + std::stringstream ss; + ss << layer->PrefetchRand(); + ss >> file_id; + std::ofstream inf((string("dump/") + file_id + + string("_info.txt")).c_str(), std::ofstream::out); + inf << image.first << std::endl + << window[WindowDataLayer::X1]+1 << std::endl + << window[WindowDataLayer::Y1]+1 << std::endl + << window[WindowDataLayer::X2]+1 << std::endl + << window[WindowDataLayer::Y2]+1 << std::endl + << do_mirror << std::endl + << top_label[item_id] << std::endl + << is_fg << std::endl; + inf.close(); + std::ofstream top_data_file((string("dump/") + file_id + + string("_data.txt")).c_str(), + std::ofstream::out | std::ofstream::binary); + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < crop_size; ++h) { + for (int w = 0; w < crop_size; ++w) { + top_data_file.write(reinterpret_cast( + &top_data[((item_id * channels + c) * crop_size + h) + * crop_size + w]), + sizeof(Dtype)); + } + } + } + top_data_file.close(); + #endif + + item_id++; + } + } + + return reinterpret_cast(NULL); +} + +template +WindowDataLayer::~WindowDataLayer() { + JoinPrefetchThread(); +} + +template +void WindowDataLayer::SetUp(const vector*>& bottom, + vector*>* top) { + Layer::SetUp(bottom, top); + // SetUp runs through the window_file and creates two structures + // that hold windows: one for foreground (object) windows and one + // for background (non-object) windows. We use an overlap threshold + // to decide which is which. + + // window_file format + // repeated: + // # image_index + // img_path (abs path) + // channels + // height + // width + // num_windows + // class_index overlap x1 y1 x2 y2 + + LOG(INFO) << "Window data layer:" << std::endl + << " foreground (object) overlap threshold: " + << this->layer_param_.window_data_param().fg_threshold() << std::endl + << " background (non-object) overlap threshold: " + << this->layer_param_.window_data_param().bg_threshold() << std::endl + << " foreground sampling fraction: " + << this->layer_param_.window_data_param().fg_fraction(); + + std::ifstream infile(this->layer_param_.window_data_param().source().c_str()); + CHECK(infile.good()) << "Failed to open window file " + << this->layer_param_.window_data_param().source() << std::endl; + + map label_hist; + label_hist.insert(std::make_pair(0, 0)); + + string hashtag; + int image_index, channels; + while (infile >> hashtag >> image_index) { + CHECK_EQ(hashtag, "#"); + // read image path + string image_path; + infile >> image_path; + // read image dimensions + vector image_size(3); + infile >> image_size[0] >> image_size[1] >> image_size[2]; + channels = image_size[0]; + image_database_.push_back(std::make_pair(image_path, image_size)); + + // read each box + int num_windows; + infile >> num_windows; + const float fg_threshold = + this->layer_param_.window_data_param().fg_threshold(); + const float bg_threshold = + this->layer_param_.window_data_param().bg_threshold(); + for (int i = 0; i < num_windows; ++i) { + int label, x1, y1, x2, y2; + float overlap; + infile >> label >> overlap >> x1 >> y1 >> x2 >> y2; + + vector window(WindowDataLayer::NUM); + window[WindowDataLayer::IMAGE_INDEX] = image_index; + window[WindowDataLayer::LABEL] = label; + window[WindowDataLayer::OVERLAP] = overlap; + window[WindowDataLayer::X1] = x1; + window[WindowDataLayer::Y1] = y1; + window[WindowDataLayer::X2] = x2; + window[WindowDataLayer::Y2] = y2; + + // add window to foreground list or background list + if (overlap >= fg_threshold) { + int label = window[WindowDataLayer::LABEL]; + CHECK_GT(label, 0); + fg_windows_.push_back(window); + label_hist.insert(std::make_pair(label, 0)); + label_hist[label]++; + } else if (overlap < bg_threshold) { + // background window, force label and overlap to 0 + window[WindowDataLayer::LABEL] = 0; + window[WindowDataLayer::OVERLAP] = 0; + bg_windows_.push_back(window); + label_hist[0]++; + } + } + + if (image_index % 100 == 0) { + LOG(INFO) << "num: " << image_index << " " + << image_path << " " + << image_size[0] << " " + << image_size[1] << " " + << image_size[2] << " " + << "windows to process: " << num_windows; + } + } + + LOG(INFO) << "Number of images: " << image_index+1; + + for (map::iterator it = label_hist.begin(); + it != label_hist.end(); ++it) { + LOG(INFO) << "class " << it->first << " has " << label_hist[it->first] + << " samples"; + } + + LOG(INFO) << "Amount of context padding: " + << this->layer_param_.window_data_param().context_pad(); + + LOG(INFO) << "Crop mode: " + << this->layer_param_.window_data_param().crop_mode(); + + // image + int crop_size = this->layer_param_.window_data_param().crop_size(); + CHECK_GT(crop_size, 0); + const int batch_size = this->layer_param_.window_data_param().batch_size(); + (*top)[0]->Reshape(batch_size, channels, crop_size, crop_size); + prefetch_data_.reset( + new Blob(batch_size, channels, crop_size, crop_size)); + + LOG(INFO) << "output data size: " << (*top)[0]->num() << "," + << (*top)[0]->channels() << "," << (*top)[0]->height() << "," + << (*top)[0]->width(); + // label + (*top)[1]->Reshape(batch_size, 1, 1, 1); + prefetch_label_.reset( + new Blob(batch_size, 1, 1, 1)); + + // check if we want to have mean + if (this->layer_param_.window_data_param().has_mean_file()) { + const string& mean_file = + this->layer_param_.window_data_param().mean_file(); + LOG(INFO) << "Loading mean file from" << mean_file; + BlobProto blob_proto; + ReadProtoFromBinaryFileOrDie(mean_file, &blob_proto); + data_mean_.FromProto(blob_proto); + CHECK_EQ(data_mean_.num(), 1); + CHECK_EQ(data_mean_.width(), data_mean_.height()); + CHECK_EQ(data_mean_.channels(), channels); + } else { + // Simply initialize an all-empty mean. + data_mean_.Reshape(1, channels, crop_size, crop_size); + } + // Now, start the prefetch thread. Before calling prefetch, we make two + // cpu_data calls so that the prefetch thread does not accidentally make + // simultaneous cudaMalloc calls when the main thread is running. In some + // GPUs this seems to cause failures if we do not so. + prefetch_data_->mutable_cpu_data(); + prefetch_label_->mutable_cpu_data(); + data_mean_.cpu_data(); + DLOG(INFO) << "Initializing prefetch"; + CreatePrefetchThread(); + DLOG(INFO) << "Prefetch initialized."; +} + +template +void WindowDataLayer::CreatePrefetchThread() { + const bool prefetch_needs_rand = + this->layer_param_.window_data_param().mirror() || + this->layer_param_.window_data_param().crop_size(); + if (prefetch_needs_rand) { + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); + } else { + prefetch_rng_.reset(); + } + // Create the thread. + CHECK(!pthread_create(&thread_, NULL, WindowDataLayerPrefetch, + static_cast(this))) << "Pthread execution failed."; +} + +template +void WindowDataLayer::JoinPrefetchThread() { + CHECK(!pthread_join(thread_, NULL)) << "Pthread joining failed."; +} + +template +unsigned int WindowDataLayer::PrefetchRand() { + CHECK(prefetch_rng_); + caffe::rng_t* prefetch_rng = + static_cast(prefetch_rng_->generator()); + return (*prefetch_rng)(); +} + +template +Dtype WindowDataLayer::Forward_cpu(const vector*>& bottom, + vector*>* top) { + // First, join the thread + JoinPrefetchThread(); + // Copy the data + caffe_copy(prefetch_data_->count(), prefetch_data_->cpu_data(), + (*top)[0]->mutable_cpu_data()); + caffe_copy(prefetch_label_->count(), prefetch_label_->cpu_data(), + (*top)[1]->mutable_cpu_data()); + // Start a new prefetch thread + CreatePrefetchThread(); + return Dtype(0.); +} + +INSTANTIATE_CLASS(WindowDataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/layers/window_data_layer.cu b/modules/dnns_easily_fooled/caffe/src/caffe/layers/window_data_layer.cu new file mode 100644 index 000000000..bc49fef65 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/layers/window_data_layer.cu @@ -0,0 +1,44 @@ +// Copyright 2014 BVLC and contributors. +// +// Based on data_layer.cpp by Yangqing Jia. + +#include +#include + +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +using std::string; +using std::map; +using std::pair; + +// caffe.proto > LayerParameter > WindowDataParameter +// 'source' field specifies the window_file +// 'crop_size' indicates the desired warped size + +namespace caffe { + +template +Dtype WindowDataLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + // First, join the thread + JoinPrefetchThread(); + // Copy the data + CUDA_CHECK(cudaMemcpy((*top)[0]->mutable_gpu_data(), + prefetch_data_->cpu_data(), sizeof(Dtype) * prefetch_data_->count(), + cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaMemcpy((*top)[1]->mutable_gpu_data(), + prefetch_label_->cpu_data(), sizeof(Dtype) * prefetch_label_->count(), + cudaMemcpyHostToDevice)); + // Start a new prefetch thread + CreatePrefetchThread(); + return Dtype(0.); +} + +INSTANTIATE_CLASS(WindowDataLayer); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/net.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/net.cpp new file mode 100644 index 000000000..41ae75203 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/net.cpp @@ -0,0 +1,433 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/layer.hpp" +#include "caffe/net.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/insert_splits.hpp" +#include "caffe/util/upgrade_proto.hpp" + +using std::pair; +using std::map; +using std::set; + +namespace caffe { + +template +Net::Net(const NetParameter& param) { + Init(param); +} + +template +Net::Net(const string& param_file) { + NetParameter param; + ReadNetParamsFromTextFileOrDie(param_file, ¶m); + Init(param); +} + +template +void Net::Init(const NetParameter& in_param) { + LOG(INFO) << "Initializing net from parameters: " << std::endl + << in_param.DebugString(); + // Create a copy of in_param with splits added where necessary. + NetParameter param; + InsertSplits(in_param, ¶m); + // Basically, build all the layers and set up its connections. + name_ = param.name(); + map blob_name_to_idx; + set available_blobs; + int num_layers = param.layers_size(); + CHECK_EQ(param.input_size() * 4, param.input_dim_size()) + << "Incorrect input blob dimension specifications."; + memory_used_ = 0; + // set the input blobs + for (int input_id = 0; input_id < param.input_size(); ++input_id) { + const int layer_id = -1; // inputs have fake layer ID -1 + AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); + } + DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + // For each layer, set up their input and output + bottom_vecs_.resize(param.layers_size()); + top_vecs_.resize(param.layers_size()); + bottom_id_vecs_.resize(param.layers_size()); + top_id_vecs_.resize(param.layers_size()); + for (int layer_id = 0; layer_id < param.layers_size(); ++layer_id) { + bool in_place = false; + const LayerParameter& layer_param = param.layers(layer_id); + layers_.push_back(shared_ptr >(GetLayer(layer_param))); + layer_names_.push_back(layer_param.name()); + LOG(INFO) << "Creating Layer " << layer_param.name(); + bool need_backward = param.force_backward(); + // Figure out this layer's input and output + for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); + ++bottom_id) { + const int blob_id = AppendBottom(param, layer_id, bottom_id, + &available_blobs, &blob_name_to_idx); + // If a blob needs backward, this layer should provide it. + need_backward |= blob_need_backward_[blob_id]; + } + for (int top_id = 0; top_id < layer_param.top_size(); ++top_id) { + AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx); + } + // After this layer is connected, set it up. + // LOG(INFO) << "Setting up " << layer_names_[layer_id]; + layers_[layer_id]->SetUp(bottom_vecs_[layer_id], &top_vecs_[layer_id]); + for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + LOG(INFO) << "Top shape: " << top_vecs_[layer_id][top_id]->num() << " " + << top_vecs_[layer_id][top_id]->channels() << " " + << top_vecs_[layer_id][top_id]->height() << " " + << top_vecs_[layer_id][top_id]->width() << " (" + << top_vecs_[layer_id][top_id]->count() << ")"; + } + DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + const int blobs_lr_size = layers_[layer_id]->layer_param().blobs_lr_size(); + CHECK(blobs_lr_size == layers_[layer_id]->blobs().size() || + blobs_lr_size == 0) << "Incorrect blobs lr size: should be either 0 " + << "or the same as the number of the layer's parameter blobs."; + if (blobs_lr_size) { + // Check if this layer needs backward operation itself + for (int param_id = 0; param_id < blobs_lr_size; ++param_id) { + need_backward |= + (layers_[layer_id]->layer_param().blobs_lr(param_id) > 0); + } + } else if (layers_[layer_id]->blobs().size()) { + // catch: if a layer param does not specify blobs_lr, we should assume the + // learning rate to be 1. Thus we will need to perform backward. + need_backward = true; + } + // Finally, set the backward flag + layer_need_backward_.push_back(need_backward); + if (need_backward) { + LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; + for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) { + blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true; + } + } else { + LOG(INFO) << layer_names_[layer_id] + << " does not need backward computation."; + } + } + // In the end, all remaining blobs are considered output blobs. + for (set::iterator it = available_blobs.begin(); + it != available_blobs.end(); ++it) { + LOG(INFO) << "This network produces output " << *it; + net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); + net_output_blob_indices_.push_back(blob_name_to_idx[*it]); + } + for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) { + blob_names_index_[blob_names_[blob_id]] = blob_id; + } + for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { + layer_names_index_[layer_names_[layer_id]] = layer_id; + } + GetLearningRateAndWeightDecay(); + LOG(INFO) << "Network initialization done."; + LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); +} + +// Helper for Net::Init: add a new input or top blob to the net. (Inputs have +// layer_id == -1, tops have layer_id >= 0.) +template +void Net::AppendTop(const NetParameter& param, const int layer_id, + const int top_id, set* available_blobs, + map* blob_name_to_idx) { + shared_ptr layer_param((layer_id >= 0) ? + (new LayerParameter(param.layers(layer_id))) : NULL); + const string& blob_name = layer_param ? + layer_param->top(top_id) : param.input(top_id); + // Check if we are doing in-place computation + if (layer_param && layer_param->bottom_size() > top_id && + blob_name == layer_param->bottom(top_id)) { + // In-place computation + LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; + top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get()); + top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]); + } else if (blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { + // If we are not doing in-place computation but have duplicated blobs, + // raise an error. + LOG(FATAL) << "Duplicate blobs produced by multiple sources."; + } else { + // Normal output. + if (layer_param) { + LOG(INFO) << layer_param->name() << " -> " << blob_name; + } else { + LOG(INFO) << "Input " << top_id << " -> " << blob_name; + } + shared_ptr > blob_pointer(new Blob()); + const int blob_id = blobs_.size(); + blobs_.push_back(blob_pointer); + blob_names_.push_back(blob_name); + blob_need_backward_.push_back(param.force_backward()); + (*blob_name_to_idx)[blob_name] = blob_id; + if (layer_id == -1) { + // Set the (explicitly specified) dimensions of the input blob. + blob_pointer->Reshape(param.input_dim(top_id * 4), + param.input_dim(top_id * 4 + 1), + param.input_dim(top_id * 4 + 2), + param.input_dim(top_id * 4 + 3)); + net_input_blob_indices_.push_back(blob_id); + net_input_blobs_.push_back(blob_pointer.get()); + } else { + top_id_vecs_[layer_id].push_back(blob_id); + top_vecs_[layer_id].push_back(blob_pointer.get()); + } + memory_used_ += blob_pointer->count(); + } + available_blobs->insert(blob_name); +} + +// Helper for Net::Init: add a new bottom blob to the net. +template +int Net::AppendBottom(const NetParameter& param, + const int layer_id, const int bottom_id, + set* available_blobs, map* blob_name_to_idx) { + const LayerParameter& layer_param = param.layers(layer_id); + const string& blob_name = layer_param.bottom(bottom_id); + if (available_blobs->find(blob_name) == available_blobs->end()) { + LOG(FATAL) << "Unknown blob input " << blob_name + << " (at index " << bottom_id << ") to layer " << layer_id; + } + const int blob_id = (*blob_name_to_idx)[blob_name]; + LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; + bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); + bottom_id_vecs_[layer_id].push_back(blob_id); + available_blobs->erase(blob_name); + bool need_backward = param.force_backward() || blob_need_backward_[blob_id]; + return blob_id; +} + +template +void Net::GetLearningRateAndWeightDecay() { + LOG(INFO) << "Collecting Learning Rate and Weight Decay."; + for (int i = 0; i < layers_.size(); ++i) { + vector > >& layer_blobs = layers_[i]->blobs(); + for (int j = 0; j < layer_blobs.size(); ++j) { + params_.push_back(layer_blobs[j]); + } + // push the learning rate mutlipliers + if (layers_[i]->layer_param().blobs_lr_size()) { + CHECK_EQ(layers_[i]->layer_param().blobs_lr_size(), layer_blobs.size()); + for (int j = 0; j < layer_blobs.size(); ++j) { + float local_lr = layers_[i]->layer_param().blobs_lr(j); + CHECK_GE(local_lr, 0.); + params_lr_.push_back(local_lr); + } + } else { + for (int j = 0; j < layer_blobs.size(); ++j) { + params_lr_.push_back(1.); + } + } + // push the weight decay multipliers + if (layers_[i]->layer_param().weight_decay_size()) { + CHECK_EQ(layers_[i]->layer_param().weight_decay_size(), + layer_blobs.size()); + for (int j = 0; j < layer_blobs.size(); ++j) { + float local_decay = layers_[i]->layer_param().weight_decay(j); + CHECK_GE(local_decay, 0.); + params_weight_decay_.push_back(local_decay); + } + } else { + for (int j = 0; j < layer_blobs.size(); ++j) { + params_weight_decay_.push_back(1.); + } + } + } +} + +template +const vector*>& Net::ForwardPrefilled(Dtype* loss) { + if (loss != NULL) { + *loss = Dtype(0.); + } + for (int i = 0; i < layers_.size(); ++i) { + // LOG(ERROR) << "Forwarding " << layer_names_[i]; + Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], &top_vecs_[i]); + if (loss != NULL) { + *loss += layer_loss; + } + } + return net_output_blobs_; +} + +template +const vector*>& Net::Forward( + const vector*> & bottom, Dtype* loss) { + // Copy bottom to internal bottom + for (int i = 0; i < bottom.size(); ++i) { + net_input_blobs_[i]->CopyFrom(*bottom[i]); + } + return ForwardPrefilled(loss); +} + +template +string Net::Forward(const string& input_blob_protos, Dtype* loss) { + BlobProtoVector blob_proto_vec; + if (net_input_blobs_.size()) { + blob_proto_vec.ParseFromString(input_blob_protos); + CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size()) + << "Incorrect input size."; + for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) { + net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i)); + } + } + ForwardPrefilled(loss); + blob_proto_vec.Clear(); + for (int i = 0; i < net_output_blobs_.size(); ++i) { + net_output_blobs_[i]->ToProto(blob_proto_vec.add_blobs()); + } + string output; + blob_proto_vec.SerializeToString(&output); + return output; +} + + +template +void Net::Backward() { + for (int i = layers_.size() - 1; i >= 0; --i) { + if (layer_need_backward_[i]) { + layers_[i]->Backward(top_vecs_[i], true, &bottom_vecs_[i]); + } + } +} + +template +void Net::ShareTrainedLayersWith(Net* other) { + int num_source_layers = other->layers().size(); + for (int i = 0; i < num_source_layers; ++i) { + Layer* source_layer = other->layers()[i].get(); + const string& source_layer_name = other->layer_names()[i]; + int target_layer_id = 0; + while (target_layer_id != layer_names_.size() && + layer_names_[target_layer_id] != source_layer_name) { + ++target_layer_id; + } + if (target_layer_id == layer_names_.size()) { + DLOG(INFO) << "Ignoring source layer " << source_layer_name; + continue; + } + DLOG(INFO) << "Copying source layer " << source_layer_name; + vector > >& target_blobs = + layers_[target_layer_id]->blobs(); + CHECK_EQ(target_blobs.size(), source_layer->blobs().size()) + << "Incompatible number of blobs for layer " << source_layer_name; + for (int j = 0; j < target_blobs.size(); ++j) { + Blob* source_blob = source_layer->blobs()[j].get(); + CHECK_EQ(target_blobs[j]->num(), source_blob->num()); + CHECK_EQ(target_blobs[j]->channels(), source_blob->channels()); + CHECK_EQ(target_blobs[j]->height(), source_blob->height()); + CHECK_EQ(target_blobs[j]->width(), source_blob->width()); + target_blobs[j]->ShareData(*source_blob); + } + } +} + +template +void Net::CopyTrainedLayersFrom(const NetParameter& param) { + int num_source_layers = param.layers_size(); + for (int i = 0; i < num_source_layers; ++i) { + const LayerParameter& source_layer = param.layers(i); + const string& source_layer_name = source_layer.name(); + int target_layer_id = 0; + while (target_layer_id != layer_names_.size() && + layer_names_[target_layer_id] != source_layer_name) { + ++target_layer_id; + } + if (target_layer_id == layer_names_.size()) { + DLOG(INFO) << "Ignoring source layer " << source_layer_name; + continue; + } + DLOG(INFO) << "Copying source layer " << source_layer_name; + vector > >& target_blobs = + layers_[target_layer_id]->blobs(); + CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) + << "Incompatible number of blobs for layer " << source_layer_name; + for (int j = 0; j < target_blobs.size(); ++j) { + CHECK_EQ(target_blobs[j]->num(), source_layer.blobs(j).num()); + CHECK_EQ(target_blobs[j]->channels(), source_layer.blobs(j).channels()); + CHECK_EQ(target_blobs[j]->height(), source_layer.blobs(j).height()); + CHECK_EQ(target_blobs[j]->width(), source_layer.blobs(j).width()); + target_blobs[j]->FromProto(source_layer.blobs(j)); + } + } +} + +template +void Net::CopyTrainedLayersFrom(const string trained_filename) { + NetParameter param; + ReadNetParamsFromBinaryFileOrDie(trained_filename, ¶m); + CopyTrainedLayersFrom(param); +} + +template +void Net::ToProto(NetParameter* param, bool write_diff) { + param->Clear(); + param->set_name(name_); + // Add bottom and top + for (int i = 0; i < net_input_blob_indices_.size(); ++i) { + param->add_input(blob_names_[net_input_blob_indices_[i]]); + } + DLOG(INFO) << "Serializing " << layers_.size() << " layers"; + for (int i = 0; i < layers_.size(); ++i) { + LayerParameter* layer_param = param->add_layers(); + for (int j = 0; j < bottom_id_vecs_[i].size(); ++j) { + layer_param->add_bottom(blob_names_[bottom_id_vecs_[i][j]]); + } + for (int j = 0; j < top_id_vecs_[i].size(); ++j) { + layer_param->add_top(blob_names_[top_id_vecs_[i][j]]); + } + layers_[i]->ToProto(layer_param, write_diff); + } +} + +template +void Net::Update() { + for (int i = 0; i < params_.size(); ++i) { + params_[i]->Update(); + } +} + +template +bool Net::has_blob(const string& blob_name) { + return blob_names_index_.find(blob_name) != blob_names_index_.end(); +} + +template +const shared_ptr > Net::blob_by_name( + const string& blob_name) { + shared_ptr > blob_ptr; + if (has_blob(blob_name)) { + blob_ptr = blobs_[blob_names_index_[blob_name]]; + } else { + blob_ptr.reset((Blob*)(NULL)); + LOG(WARNING) << "Unknown blob name " << blob_name; + } + return blob_ptr; +} + +template +bool Net::has_layer(const string& layer_name) { + return layer_names_index_.find(layer_name) != layer_names_index_.end(); +} + +template +const shared_ptr > Net::layer_by_name( + const string& layer_name) { + shared_ptr > layer_ptr; + if (has_layer(layer_name)) { + layer_ptr = layers_[layer_names_index_[layer_name]]; + } else { + layer_ptr.reset((Layer*)(NULL)); + LOG(WARNING) << "Unknown layer name " << layer_name; + } + return layer_ptr; +} + +INSTANTIATE_CLASS(Net); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/proto/caffe.proto b/modules/dnns_easily_fooled/caffe/src/caffe/proto/caffe.proto new file mode 100644 index 000000000..59ba5996d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/proto/caffe.proto @@ -0,0 +1,526 @@ +// Copyright 2014 BVLC and contributors. + +package caffe; + +message BlobProto { + optional int32 num = 1 [default = 0]; + optional int32 channels = 2 [default = 0]; + optional int32 height = 3 [default = 0]; + optional int32 width = 4 [default = 0]; + repeated float data = 5 [packed = true]; + repeated float diff = 6 [packed = true]; +} + +// The BlobProtoVector is simply a way to pass multiple blobproto instances +// around. +message BlobProtoVector { + repeated BlobProto blobs = 1; +} + +message Datum { + optional int32 channels = 1; + optional int32 height = 2; + optional int32 width = 3; + // the actual image data, in bytes + optional bytes data = 4; + optional int32 label = 5; + // Optionally, the datum could also hold float data. + repeated float float_data = 6; +} + +message FillerParameter { + // The filler type. + optional string type = 1 [default = 'constant']; + optional float value = 2 [default = 0]; // the value in constant filler + optional float min = 3 [default = 0]; // the min value in uniform filler + optional float max = 4 [default = 1]; // the max value in uniform filler + optional float mean = 5 [default = 0]; // the mean value in Gaussian filler + optional float std = 6 [default = 1]; // the std value in Gaussian filler + // The expected number of non-zero input weights for a given output in + // Gaussian filler -- the default -1 means don't perform sparsification. + optional int32 sparse = 7 [default = -1]; +} + +message NetParameter { + optional string name = 1; // consider giving the network a name + repeated LayerParameter layers = 2; // a bunch of layers. + // The input blobs to the network. + repeated string input = 3; + // The dim of the input blobs. For each input blob there should be four + // values specifying the num, channels, height and width of the input blob. + // Thus, there should be a total of (4 * #input) numbers. + repeated int32 input_dim = 4; + // Whether the network will force every layer to carry out backward operation. + // If set False, then whether to carry out backward is determined + // automatically according to the net structure and learning rates. + optional bool force_backward = 5 [default = false]; +} + +message SolverParameter { + // {train,test}_net specify a path to a file containing the {train,test} net + // parameters; {train,test}_net_param specify the net parameters directly + // inside the SolverParameter. + // + // Only either train_net or train_net_param (not both) should be specified. + // You may specify 0 or more test_net and/or test_net_param. All + // nets specified using test_net_param will be tested first, followed by all + // nets specified using test_net (each processed in the order specified in + // the prototxt). + optional string train_net = 1; // The proto filename for the train net. + repeated string test_net = 2; // The proto filenames for the test nets. + optional NetParameter train_net_param = 21; // Full params for the train net. + repeated NetParameter test_net_param = 22; // Full params for the test nets. + // The number of iterations for each testing phase. + repeated int32 test_iter = 3; + // The number of iterations between two testing phases. + optional int32 test_interval = 4 [default = 0]; + optional bool test_compute_loss = 19 [default = false]; + optional float base_lr = 5; // The base learning rate + // the number of iterations between displaying info. If display = 0, no info + // will be displayed. + optional int32 display = 6; + optional int32 max_iter = 7; // the maximum number of iterations + optional string lr_policy = 8; // The learning rate decay policy. + optional float gamma = 9; // The parameter to compute the learning rate. + optional float power = 10; // The parameter to compute the learning rate. + optional float momentum = 11; // The momentum value. + optional float weight_decay = 12; // The weight decay. + optional int32 stepsize = 13; // the stepsize for learning rate policy "step" + optional int32 snapshot = 14 [default = 0]; // The snapshot interval + optional string snapshot_prefix = 15; // The prefix for the snapshot. + // whether to snapshot diff in the results or not. Snapshotting diff will help + // debugging but the final protocol buffer size will be much larger. + optional bool snapshot_diff = 16 [default = false]; + // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. + enum SolverMode { + CPU = 0; + GPU = 1; + } + optional SolverMode solver_mode = 17 [default = GPU]; + // the device_id will that be used in GPU mode. Use device_id = 0 in default. + optional int32 device_id = 18 [default = 0]; + // If non-negative, the seed with which the Solver will initialize the Caffe + // random number generator -- useful for reproducible results. Otherwise, + // (and by default) initialize using a seed derived from the system clock. + optional int64 random_seed = 20 [default = -1]; +} + +// A message that stores the solver snapshots +message SolverState { + optional int32 iter = 1; // The current iteration + optional string learned_net = 2; // The file that stores the learned net. + repeated BlobProto history = 3; // The history for sgd solvers +} + +// NOTE +// Update the next available ID when you add a new LayerParameter field. +// +// LayerParameter next available ID: 27 (last added: dummy_data_param) +message LayerParameter { + repeated string bottom = 2; // the name of the bottom blobs + repeated string top = 3; // the name of the top blobs + optional string name = 4; // the layer name + + // NOTE + // Add new LayerTypes to the enum below in lexicographical order (other than + // starting with NONE), starting with the next available ID in the comment + // line above the enum. Update the next available ID when you add a new + // LayerType. + // + // LayerType next available ID: 33 (last added: DUMMY_DATA) + enum LayerType { + // "NONE" layer type is 0th enum element so that we don't cause confusion + // by defaulting to an existent LayerType (instead, should usually error if + // the type is unspecified). + NONE = 0; + ACCURACY = 1; + ARGMAX = 30; + BNLL = 2; + CONCAT = 3; + CONVOLUTION = 4; + DATA = 5; + DROPOUT = 6; + DUMMY_DATA = 32; + EUCLIDEAN_LOSS = 7; + ELTWISE = 25; + FLATTEN = 8; + HDF5_DATA = 9; + HDF5_OUTPUT = 10; + HINGE_LOSS = 28; + IM2COL = 11; + IMAGE_DATA = 12; + INFOGAIN_LOSS = 13; + INNER_PRODUCT = 14; + LRN = 15; + MEMORY_DATA = 29; + MULTINOMIAL_LOGISTIC_LOSS = 16; + POOLING = 17; + POWER = 26; + RELU = 18; + SIGMOID = 19; + SIGMOID_CROSS_ENTROPY_LOSS = 27; + SOFTMAX = 20; + SOFTMAX_LOSS = 21; + SPLIT = 22; + TANH = 23; + WINDOW_DATA = 24; + THRESHOLD = 31; + } + optional LayerType type = 5; // the layer type from the enum above + + // The blobs containing the numeric parameters of the layer + repeated BlobProto blobs = 6; + // The ratio that is multiplied on the global learning rate. If you want to + // set the learning ratio for one blob, you need to set it for all blobs. + repeated float blobs_lr = 7; + // The weight decay that is multiplied on the global weight decay. + repeated float weight_decay = 8; + + optional ArgMaxParameter argmax_param = 23; + optional ConcatParameter concat_param = 9; + optional ConvolutionParameter convolution_param = 10; + optional DataParameter data_param = 11; + optional DropoutParameter dropout_param = 12; + optional DummyDataParameter dummy_data_param = 26; + optional EltwiseParameter eltwise_param = 24; + optional HDF5DataParameter hdf5_data_param = 13; + optional HDF5OutputParameter hdf5_output_param = 14; + optional ImageDataParameter image_data_param = 15; + optional InfogainLossParameter infogain_loss_param = 16; + optional InnerProductParameter inner_product_param = 17; + optional LRNParameter lrn_param = 18; + optional MemoryDataParameter memory_data_param = 22; + optional PoolingParameter pooling_param = 19; + optional PowerParameter power_param = 21; + optional WindowDataParameter window_data_param = 20; + optional ThresholdParameter threshold_param = 25; + optional HingeLossParameter hinge_loss_param = 29; + + // DEPRECATED: The layer parameters specified as a V0LayerParameter. + // This should never be used by any code except to upgrade to the new + // LayerParameter specification. + optional V0LayerParameter layer = 1; +} + +// Message that stores parameters used by ArgMaxLayer +message ArgMaxParameter { + // If true produce pairs (argmax, maxval) + optional bool out_max_val = 1 [default = false]; +} + +// Message that stores parameters used by ConcatLayer +message ConcatParameter { + // Concat Layer needs to specify the dimension along the concat will happen, + // the other dimensions must be the same for all the bottom blobs + // By default it will concatenate blobs along channels dimension + optional uint32 concat_dim = 1 [default = 1]; +} + +// Message that stores parameters used by ConvolutionLayer +message ConvolutionParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + optional uint32 pad = 3 [default = 0]; // The padding size + optional uint32 kernel_size = 4; // The kernel size + optional uint32 group = 5 [default = 1]; // The group size for group conv + optional uint32 stride = 6 [default = 1]; // The stride + optional FillerParameter weight_filler = 7; // The filler for the weight + optional FillerParameter bias_filler = 8; // The filler for the bias +} + +// Message that stores parameters used by DataLayer +message DataParameter { + enum DB { + LEVELDB = 0; + LMDB = 1; + } + // Specify the data source. + optional string source = 1; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // Specify the batch size. + optional uint32 batch_size = 4; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 5 [default = 0]; + // Specify if we want to randomly mirror data. + optional bool mirror = 6 [default = false]; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the leveldb. + optional uint32 rand_skip = 7 [default = 0]; + optional DB backend = 8 [default = LEVELDB]; +} + +// Message that stores parameters used by DropoutLayer +message DropoutParameter { + optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio +} + +// Message that stores parameters used by DummyDataLayer. +// DummyDataLayer fills any number of arbitrarily shaped blobs with random +// (or constant) data generated by "Fillers" (see "message FillerParameter"). +message DummyDataParameter { + // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N + // num, N channels, N height, and N width fields, and must specify 0, 1 or N + // data_fillers. + // + // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. + // If 1 data_filler is specified, it is applied to all top blobs. If N are + // specified, the ith is applied to the ith top blob. + repeated FillerParameter data_filler = 1; + repeated uint32 num = 2; + repeated uint32 channels = 3; + repeated uint32 height = 4; + repeated uint32 width = 5; +} + +// Message that stores parameters used by EltwiseLayer +message EltwiseParameter { + enum EltwiseOp { + PROD = 0; + SUM = 1; + } + optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation + repeated float coeff = 2; // blob-wise coefficient for SUM operation +} + +// Message that stores parameters used by ThresholdLayer +message ThresholdParameter { + optional float threshold = 1 [default = 0]; // Strictly Positive values +} + +// Message that stores parameters used by HDF5DataLayer +message HDF5DataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 2; +} + +// Message that stores parameters used by HDF5OutputLayer +message HDF5OutputParameter { + optional string file_name = 1; +} + +message HingeLossParameter { + enum Norm { + L1 = 1; + L2 = 2; + } + // Specify the Norm to use L1 or L2 + optional Norm norm = 1 [default = L1]; +} + +// Message that stores parameters used by ImageDataLayer +message ImageDataParameter { + // Specify the data source. + optional string source = 1; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // Specify the batch size. + optional uint32 batch_size = 4; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 5 [default = 0]; + // Specify if we want to randomly mirror data. + optional bool mirror = 6 [default = false]; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the leveldb. + optional uint32 rand_skip = 7 [default = 0]; + // Whether or not ImageLayer should shuffle the list of files at every epoch. + optional bool shuffle = 8 [default = false]; + // It will also resize images if new_height or new_width are not zero. + optional uint32 new_height = 9 [default = 0]; + optional uint32 new_width = 10 [default = 0]; + + // By default assumes images are in color + optional bool images_in_color = 11 [default = true]; +} + +// Message that stores parameters InfogainLossLayer +message InfogainLossParameter { + // Specify the infogain matrix source. + optional string source = 1; +} + +// Message that stores parameters used by InnerProductLayer +message InnerProductParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 3; // The filler for the weight + optional FillerParameter bias_filler = 4; // The filler for the bias +} + +// Message that stores parameters used by LRNLayer +message LRNParameter { + optional uint32 local_size = 1 [default = 5]; + optional float alpha = 2 [default = 1.]; + optional float beta = 3 [default = 0.75]; + enum NormRegion { + ACROSS_CHANNELS = 0; + WITHIN_CHANNEL = 1; + } + optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; +} + +// Message that stores parameters used by MemoryDataLayer +message MemoryDataParameter { + optional uint32 batch_size = 1; + optional uint32 channels = 2; + optional uint32 height = 3; + optional uint32 width = 4; +} + +// Message that stores parameters used by PoolingLayer +message PoolingParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 1 [default = MAX]; // The pooling method + optional uint32 kernel_size = 2; // The kernel size + optional uint32 stride = 3 [default = 1]; // The stride + // The padding size -- currently implemented only for average and max pooling. + // average pooling zero pads. max pooling -inf pads. + optional uint32 pad = 4 [default = 0]; +} + +// Message that stores parameters used by PowerLayer +message PowerParameter { + // PowerLayer computes outputs y = (shift + scale * x) ^ power. + optional float power = 1 [default = 1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +// Message that stores parameters used by WindowDataLayer +message WindowDataParameter { + // Specify the data source. + optional string source = 1; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // Specify the batch size. + optional uint32 batch_size = 4; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 5 [default = 0]; + // Specify if we want to randomly mirror data. + optional bool mirror = 6 [default = false]; + // Foreground (object) overlap threshold + optional float fg_threshold = 7 [default = 0.5]; + // Background (non-object) overlap threshold + optional float bg_threshold = 8 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float fg_fraction = 9 [default = 0.25]; + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 context_pad = 10 [default = 0]; + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string crop_mode = 11 [default = "warp"]; +} + +// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters +// in Caffe. We keep this message type around for legacy support. +message V0LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the string to specify the layer type + + // Parameters to specify layers with inner products. + optional uint32 num_output = 3; // The number of outputs for the layer + optional bool biasterm = 4 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 5; // The filler for the weight + optional FillerParameter bias_filler = 6; // The filler for the bias + + optional uint32 pad = 7 [default = 0]; // The padding size + optional uint32 kernelsize = 8; // The kernel size + optional uint32 group = 9 [default = 1]; // The group size for group conv + optional uint32 stride = 10 [default = 1]; // The stride + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 11 [default = MAX]; // The pooling method + optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio + + optional uint32 local_size = 13 [default = 5]; // for local response norm + optional float alpha = 14 [default = 1.]; // for local response norm + optional float beta = 15 [default = 0.75]; // for local response norm + + // For data layers, specify the data source + optional string source = 16; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 17 [default = 1]; + optional string meanfile = 18; + // For data layers, specify the batch size. + optional uint32 batchsize = 19; + // For data layers, specify if we would like to randomly crop an image. + optional uint32 cropsize = 20 [default = 0]; + // For data layers, specify if we want to randomly mirror data. + optional bool mirror = 21 [default = false]; + + // The blobs containing the numeric parameters of the layer + repeated BlobProto blobs = 50; + // The ratio that is multiplied on the global learning rate. If you want to + // set the learning ratio for one blob, you need to set it for all blobs. + repeated float blobs_lr = 51; + // The weight decay that is multiplied on the global weight decay. + repeated float weight_decay = 52; + + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the leveldb. + optional uint32 rand_skip = 53 [default = 0]; + + // Fields related to detection (det_*) + // foreground (object) overlap threshold + optional float det_fg_threshold = 54 [default = 0.5]; + // background (non-object) overlap threshold + optional float det_bg_threshold = 55 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float det_fg_fraction = 56 [default = 0.25]; + + // optional bool OBSOLETE_can_clobber = 57 [default = true]; + + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 det_context_pad = 58 [default = 0]; + + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string det_crop_mode = 59 [default = "warp"]; + + // For ReshapeLayer, one needs to specify the new dimensions. + optional int32 new_num = 60 [default = 0]; + optional int32 new_channels = 61 [default = 0]; + optional int32 new_height = 62 [default = 0]; + optional int32 new_width = 63 [default = 0]; + + // Whether or not ImageLayer should shuffle the list of files at every epoch. + // It will also resize images if new_height or new_width are not zero. + optional bool shuffle_images = 64 [default = false]; + + // For ConcatLayer, one needs to specify the dimension for concatenation, and + // the other dimensions must be the same for all the bottom blobs. + // By default it will concatenate blobs along the channels dimension. + optional uint32 concat_dim = 65 [default = 1]; + + optional HDF5OutputParameter hdf5_output_param = 1001; +} diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/proto/caffe_pretty_print.proto b/modules/dnns_easily_fooled/caffe/src/caffe/proto/caffe_pretty_print.proto new file mode 100644 index 000000000..cfdce82c7 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/proto/caffe_pretty_print.proto @@ -0,0 +1,18 @@ +// Copyright 2014 BVLC and contributors. + +package caffe; + +import "caffe/proto/caffe.proto"; + +// A near-duplicate of NetParameter with fields re-numbered to beautify +// automatic prototext dumps. The main practical purpose is to print inputs +// before layers, because having inputs at the end looks weird. +// NetParameterPrettyPrint should never be used in code except for conversion +// FROM NetParameter and subsequent dumping to proto text file. +message NetParameterPrettyPrint { + optional string name = 1; + optional bool force_backward = 2 [default = false]; + repeated string input = 3; + repeated int32 input_dim = 4; + repeated LayerParameter layers = 5; +} diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/solver.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/solver.cpp new file mode 100644 index 000000000..769618175 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/solver.cpp @@ -0,0 +1,346 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include +#include +#include + +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +using std::max; +using std::min; + +namespace caffe { + +template +Solver::Solver(const SolverParameter& param) + : net_() { + Init(param); +} + +template +Solver::Solver(const string& param_file) + : net_() { + SolverParameter param; + ReadProtoFromTextFile(param_file, ¶m); + Init(param); +} + +template +void Solver::Init(const SolverParameter& param) { + LOG(INFO) << "Initializing solver from parameters: " << std::endl + << param.DebugString(); + param_ = param; + if (param_.solver_mode() == SolverParameter_SolverMode_GPU && + param_.has_device_id()) { + Caffe::SetDevice(param_.device_id()); + } + Caffe::set_mode(Caffe::Brew(param_.solver_mode())); + if (param_.random_seed() >= 0) { + Caffe::set_random_seed(param_.random_seed()); + } + // Scaffolding code + if (param_.has_train_net_param()) { + CHECK(!param_.has_train_net()) << "Either train_net_param or train_net may " + << "be specified, but not both."; + LOG(INFO) << "Creating training net specified in SolverParameter."; + net_.reset(new Net(param_.train_net_param())); + } else { + CHECK(param_.has_train_net()) + << "Neither train_net nor train_net_param were specified."; + LOG(INFO) << "Creating training net from file: " << param_.train_net(); + net_.reset(new Net(param_.train_net())); + } + const int num_test_net_params = param_.test_net_param_size(); + const int num_test_net_files = param_.test_net_size(); + const int num_test_nets = num_test_net_params + num_test_net_files; + if (num_test_nets) { + CHECK_EQ(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + CHECK_GT(param_.test_interval(), 0); + } + test_nets_.resize(num_test_nets); + for (int i = 0; i < num_test_net_params; ++i) { + LOG(INFO) << "Creating testing net (#" << i + << ") specified in SolverParameter."; + test_nets_[i].reset(new Net(param_.test_net_param(i))); + } + for (int i = 0, test_net_id = num_test_net_params; + i < num_test_net_files; ++i, ++test_net_id) { + LOG(INFO) << "Creating testing net (#" << test_net_id + << ") from file: " << param.test_net(i); + test_nets_[test_net_id].reset(new Net(param_.test_net(i))); + } + LOG(INFO) << "Solver scaffolding done."; +} + +template +void Solver::Solve(const char* resume_file) { + Caffe::set_phase(Caffe::TRAIN); + LOG(INFO) << "Solving " << net_->name(); + PreSolve(); + + iter_ = 0; + if (resume_file) { + LOG(INFO) << "Restoring previous solver status from " << resume_file; + Restore(resume_file); + } + + // Run a test pass before doing any training to avoid waiting a potentially + // very long time (param_.test_interval() training iterations) to report that + // there's not enough memory to run the test net and crash, etc.; and to gauge + // the effect of the first training iterations. + if (param_.test_interval()) { + TestAll(); + } + + // For a network that is trained by the solver, no bottom or top vecs + // should be given, and we will just provide dummy vecs. + vector*> bottom_vec; + while (iter_++ < param_.max_iter()) { + Dtype loss = net_->ForwardBackward(bottom_vec); + ComputeUpdateValue(); + net_->Update(); + + if (param_.display() && iter_ % param_.display() == 0) { + LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; + } + if (param_.test_interval() && iter_ % param_.test_interval() == 0) { + TestAll(); + } + // Check if we need to do snapshot + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { + Snapshot(); + } + } + // After the optimization is done, always do a snapshot. + iter_--; + Snapshot(); + LOG(INFO) << "Optimization Done."; +} + + +template +void Solver::TestAll() { + for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { + Test(test_net_id); + } +} + + +template +void Solver::Test(const int test_net_id) { + LOG(INFO) << "Iteration " << iter_ + << ", Testing net (#" << test_net_id << ")"; + // We need to set phase to test before running. + Caffe::set_phase(Caffe::TEST); + CHECK_NOTNULL(test_nets_[test_net_id].get())-> + ShareTrainedLayersWith(net_.get()); + vector test_score; + vector*> bottom_vec; + Dtype loss = 0; + for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + Dtype iter_loss; + const vector*>& result = + test_nets_[test_net_id]->Forward(bottom_vec, &iter_loss); + if (param_.test_compute_loss()) { + loss += iter_loss; + } + if (i == 0) { + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score.push_back(result_vec[k]); + } + } + } else { + int idx = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score[idx++] += result_vec[k]; + } + } + } + } + if (param_.test_compute_loss()) { + loss /= param_.test_iter(test_net_id); + LOG(INFO) << "Test loss: " << loss; + } + for (int i = 0; i < test_score.size(); ++i) { + LOG(INFO) << "Test score #" << i << ": " + << test_score[i] / param_.test_iter(test_net_id); + } + Caffe::set_phase(Caffe::TRAIN); +} + + +template +void Solver::Snapshot() { + NetParameter net_param; + // For intermediate results, we will also dump the gradient values. + net_->ToProto(&net_param, param_.snapshot_diff()); + string filename(param_.snapshot_prefix()); + const int kBufferSize = 20; + char iter_str_buffer[kBufferSize]; + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); + filename += iter_str_buffer; + LOG(INFO) << "Snapshotting to " << filename; + WriteProtoToBinaryFile(net_param, filename.c_str()); + SolverState state; + SnapshotSolverState(&state); + state.set_iter(iter_); + state.set_learned_net(filename); + filename += ".solverstate"; + LOG(INFO) << "Snapshotting solver state to " << filename; + WriteProtoToBinaryFile(state, filename.c_str()); +} + +template +void Solver::Restore(const char* state_file) { + SolverState state; + NetParameter net_param; + ReadProtoFromBinaryFile(state_file, &state); + if (state.has_learned_net()) { + ReadProtoFromBinaryFile(state.learned_net().c_str(), &net_param); + net_->CopyTrainedLayersFrom(net_param); + } + iter_ = state.iter(); + RestoreSolverState(state); +} + + +// Return the current learning rate. The currently implemented learning rate +// policies are as follows: +// - fixed: always return base_lr. +// - step: return base_lr * gamma ^ (floor(iter / step)) +// - exp: return base_lr * gamma ^ iter +// - inv: return base_lr * (1 + gamma * iter) ^ (- power) +// where base_lr, gamma, step and power are defined in the solver parameter +// protocol buffer, and iter is the current iteration. +template +Dtype SGDSolver::GetLearningRate() { + Dtype rate; + const string& lr_policy = this->param_.lr_policy(); + if (lr_policy == "fixed") { + rate = this->param_.base_lr(); + } else if (lr_policy == "step") { + int current_step = this->iter_ / this->param_.stepsize(); + rate = this->param_.base_lr() * + pow(this->param_.gamma(), current_step); + } else if (lr_policy == "exp") { + rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); + } else if (lr_policy == "inv") { + rate = this->param_.base_lr() * + pow(Dtype(1) + this->param_.gamma() * this->iter_, + - this->param_.power()); + } else { + LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; + } + return rate; +} + + +template +void SGDSolver::PreSolve() { + // Initialize the history + vector > >& net_params = this->net_->params(); + history_.clear(); + for (int i = 0; i < net_params.size(); ++i) { + const Blob* net_param = net_params[i].get(); + history_.push_back(shared_ptr >(new Blob( + net_param->num(), net_param->channels(), net_param->height(), + net_param->width()))); + } +} + + +template +void SGDSolver::ComputeUpdateValue() { + vector > >& net_params = this->net_->params(); + vector& net_params_lr = this->net_->params_lr(); + vector& net_params_weight_decay = this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + Dtype momentum = this->param_.momentum(); + Dtype weight_decay = this->param_.weight_decay(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + if (local_decay) { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay * local_rate, + net_params[param_id]->cpu_data(), + history_[param_id]->mutable_cpu_data()); + } + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + if (local_decay) { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay * local_rate, + net_params[param_id]->gpu_data(), + history_[param_id]->mutable_gpu_data()); + } + // copy + caffe_gpu_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::SnapshotSolverState(SolverState* state) { + state->clear_history(); + for (int i = 0; i < history_.size(); ++i) { + // Add history + BlobProto* history_blob = state->add_history(); + history_[i]->ToProto(history_blob); + } +} + +template +void SGDSolver::RestoreSolverState(const SolverState& state) { + CHECK_EQ(state.history_size(), history_.size()) + << "Incorrect length of history blobs."; + LOG(INFO) << "SGDSolver: restoring history"; + for (int i = 0; i < history_.size(); ++i) { + history_[i]->FromProto(state.history(i)); + } +} + +INSTANTIATE_CLASS(Solver); +INSTANTIATE_CLASS(SGDSolver); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/syncedmem.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/syncedmem.cpp new file mode 100644 index 000000000..fec37d6e9 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/syncedmem.cpp @@ -0,0 +1,98 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include + +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" + +namespace caffe { + +SyncedMemory::~SyncedMemory() { + if (cpu_ptr_ && own_cpu_data_) { + CaffeFreeHost(cpu_ptr_); + } + + if (gpu_ptr_) { + CUDA_CHECK(cudaFree(gpu_ptr_)); + } +} + +inline void SyncedMemory::to_cpu() { + switch (head_) { + case UNINITIALIZED: + CaffeMallocHost(&cpu_ptr_, size_); + memset(cpu_ptr_, 0, size_); + head_ = HEAD_AT_CPU; + own_cpu_data_ = true; + break; + case HEAD_AT_GPU: + if (cpu_ptr_ == NULL) { + CaffeMallocHost(&cpu_ptr_, size_); + own_cpu_data_ = true; + } + CUDA_CHECK(cudaMemcpy(cpu_ptr_, gpu_ptr_, size_, cudaMemcpyDeviceToHost)); + head_ = SYNCED; + break; + case HEAD_AT_CPU: + case SYNCED: + break; + } +} + +inline void SyncedMemory::to_gpu() { + switch (head_) { + case UNINITIALIZED: + CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + CUDA_CHECK(cudaMemset(gpu_ptr_, 0, size_)); + head_ = HEAD_AT_GPU; + break; + case HEAD_AT_CPU: + if (gpu_ptr_ == NULL) { + CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); + } + CUDA_CHECK(cudaMemcpy(gpu_ptr_, cpu_ptr_, size_, cudaMemcpyHostToDevice)); + head_ = SYNCED; + break; + case HEAD_AT_GPU: + case SYNCED: + break; + } +} + +const void* SyncedMemory::cpu_data() { + to_cpu(); + return (const void*)cpu_ptr_; +} + +void SyncedMemory::set_cpu_data(void* data) { + CHECK(data); + if (own_cpu_data_) { + CaffeFreeHost(cpu_ptr_); + } + cpu_ptr_ = data; + head_ = HEAD_AT_CPU; + own_cpu_data_ = false; +} + +const void* SyncedMemory::gpu_data() { + to_gpu(); + return (const void*)gpu_ptr_; +} + +void* SyncedMemory::mutable_cpu_data() { + to_cpu(); + head_ = HEAD_AT_CPU; + return cpu_ptr_; +} + +void* SyncedMemory::mutable_gpu_data() { + to_gpu(); + head_ = HEAD_AT_GPU; + return gpu_ptr_; +} + + +} // namespace caffe + diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_argmax_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_argmax_layer.cpp new file mode 100644 index 000000000..c4150e5ab --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_argmax_layer.cpp @@ -0,0 +1,113 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class ArgMaxLayerTest : public ::testing::Test { + protected: + ArgMaxLayerTest() + : blob_bottom_(new Blob(20, 10, 1, 1)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~ArgMaxLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(ArgMaxLayerTest, Dtypes); + + +TYPED_TEST(ArgMaxLayerTest, TestSetup) { + LayerParameter layer_param; + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), 1); +} + +TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { + LayerParameter layer_param; + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + argmax_param->set_out_max_val(true); + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), 2); +} + +TYPED_TEST(ArgMaxLayerTest, TestCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + int max_ind; + TypeParam max_val; + int num = this->blob_bottom_->num(); + int dim = this->blob_bottom_->count() / num; + for (int i = 0; i < num; ++i) { + EXPECT_GE(top_data[i], 0); + EXPECT_LE(top_data[i], dim); + max_ind = top_data[i]; + max_val = bottom_data[i * dim + max_ind]; + for (int j = 0; j < dim; ++j) { + EXPECT_LE(bottom_data[i * dim + j], max_val); + } + } +} + +TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); + argmax_param->set_out_max_val(true); + ArgMaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + int max_ind; + TypeParam max_val; + int num = this->blob_bottom_->num(); + int dim = this->blob_bottom_->count() / num; + for (int i = 0; i < num; ++i) { + EXPECT_GE(top_data[i], 0); + EXPECT_LE(top_data[i], dim); + max_ind = top_data[i * 2]; + max_val = top_data[i * 2 + 1]; + EXPECT_EQ(bottom_data[i * dim + max_ind], max_val); + for (int j = 0; j < dim; ++j) { + EXPECT_LE(bottom_data[i * dim + j], max_val); + } + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_benchmark.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_benchmark.cpp new file mode 100644 index 000000000..40eee9c80 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_benchmark.cpp @@ -0,0 +1,169 @@ +// Copyright 2014 BVLC and contributors. + +#include // for usleep +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/benchmark.hpp" +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +class BenchmarkTest : public ::testing::Test {}; + +TEST_F(BenchmarkTest, TestTimerConstructorCPU) { + Caffe::set_mode(Caffe::CPU); + Timer timer; + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerConstructorGPU) { + Caffe::set_mode(Caffe::GPU); + Timer timer; + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerStartCPU) { + Caffe::set_mode(Caffe::CPU); + Timer timer; + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Stop(); + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerStartGPU) { + Caffe::set_mode(Caffe::GPU); + Timer timer; + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Stop(); + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Start(); + EXPECT_TRUE(timer.initted()); + EXPECT_TRUE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerStopCPU) { + Caffe::set_mode(Caffe::CPU); + Timer timer; + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerStopGPU) { + Caffe::set_mode(Caffe::GPU); + Timer timer; + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); + timer.Stop(); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerMilliSecondsCPU) { + Caffe::set_mode(Caffe::CPU); + Timer timer; + CHECK_EQ(timer.MilliSeconds(), 0); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + usleep(300 * 1000); + CHECK_GE(timer.MilliSeconds(), 298); + CHECK_LE(timer.MilliSeconds(), 302); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerMilliSecondsGPU) { + Caffe::set_mode(Caffe::GPU); + Timer timer; + CHECK_EQ(timer.MilliSeconds(), 0); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + usleep(300 * 1000); + CHECK_GE(timer.MilliSeconds(), 298); + CHECK_LE(timer.MilliSeconds(), 302); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerSecondsCPU) { + Caffe::set_mode(Caffe::CPU); + Timer timer; + CHECK_EQ(timer.Seconds(), 0); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + usleep(300 * 1000); + CHECK_GE(timer.Seconds(), 0.298); + CHECK_LE(timer.Seconds(), 0.302); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +TEST_F(BenchmarkTest, TestTimerSecondsGPU) { + Caffe::set_mode(Caffe::GPU); + Timer timer; + CHECK_EQ(timer.Seconds(), 0); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_FALSE(timer.has_run_at_least_once()); + timer.Start(); + usleep(300 * 1000); + CHECK_GE(timer.Seconds(), 0.298); + CHECK_LE(timer.Seconds(), 0.302); + EXPECT_TRUE(timer.initted()); + EXPECT_FALSE(timer.running()); + EXPECT_TRUE(timer.has_run_at_least_once()); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_blob.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_blob.cpp new file mode 100644 index 000000000..5d38e54ff --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_blob.cpp @@ -0,0 +1,60 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/common.hpp" +#include "caffe/blob.hpp" +#include "caffe/filler.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class BlobSimpleTest : public ::testing::Test { + protected: + BlobSimpleTest() + : blob_(new Blob()), + blob_preshaped_(new Blob(2, 3, 4, 5)) {} + virtual ~BlobSimpleTest() { delete blob_; delete blob_preshaped_; } + Blob* const blob_; + Blob* const blob_preshaped_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(BlobSimpleTest, Dtypes); + +TYPED_TEST(BlobSimpleTest, TestInitialization) { + EXPECT_TRUE(this->blob_); + EXPECT_TRUE(this->blob_preshaped_); + EXPECT_EQ(this->blob_preshaped_->num(), 2); + EXPECT_EQ(this->blob_preshaped_->channels(), 3); + EXPECT_EQ(this->blob_preshaped_->height(), 4); + EXPECT_EQ(this->blob_preshaped_->width(), 5); + EXPECT_EQ(this->blob_preshaped_->count(), 120); + EXPECT_EQ(this->blob_->num(), 0); + EXPECT_EQ(this->blob_->channels(), 0); + EXPECT_EQ(this->blob_->height(), 0); + EXPECT_EQ(this->blob_->width(), 0); + EXPECT_EQ(this->blob_->count(), 0); +} + +TYPED_TEST(BlobSimpleTest, TestPointers) { + EXPECT_TRUE(this->blob_preshaped_->gpu_data()); + EXPECT_TRUE(this->blob_preshaped_->cpu_data()); + EXPECT_TRUE(this->blob_preshaped_->mutable_gpu_data()); + EXPECT_TRUE(this->blob_preshaped_->mutable_cpu_data()); +} + +TYPED_TEST(BlobSimpleTest, TestReshape) { + this->blob_->Reshape(2, 3, 4, 5); + EXPECT_EQ(this->blob_->num(), 2); + EXPECT_EQ(this->blob_->channels(), 3); + EXPECT_EQ(this->blob_->height(), 4); + EXPECT_EQ(this->blob_->width(), 5); + EXPECT_EQ(this->blob_->count(), 120); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_caffe_main.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_caffe_main.cpp new file mode 100644 index 000000000..ecc117e3b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_caffe_main.cpp @@ -0,0 +1,32 @@ +// Copyright 2014 BVLC and contributors. + +// The main caffe test code. Your test cpp code should include this hpp +// to allow a main function to be compiled into the binary. + +#include "test_caffe_main.hpp" + +namespace caffe { + cudaDeviceProp CAFFE_TEST_CUDA_PROP; +} + +using caffe::CAFFE_TEST_CUDA_PROP; + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + ::google::InitGoogleLogging(argv[0]); + // Before starting testing, let's first print out a few cuda defice info. + int device; + cudaGetDeviceCount(&device); + cout << "Cuda number of devices: " << device << endl; + if (argc > 1) { + // Use the given device + device = atoi(argv[1]); + cudaSetDevice(device); + cout << "Setting to use device " << device << endl; + } + cudaGetDevice(&device); + cout << "Current device id: " << device << endl; + cudaGetDeviceProperties(&CAFFE_TEST_CUDA_PROP, device); + // invoke the test. + return RUN_ALL_TESTS(); +} diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_caffe_main.hpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_caffe_main.hpp new file mode 100644 index 000000000..df64cbb41 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_caffe_main.hpp @@ -0,0 +1,20 @@ +// Copyright 2014 BVLC and contributors. + +// The main caffe test code. Your test cpp code should include this hpp +// to allow a main function to be compiled into the binary. +#ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ +#define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ + +#include +#include +#include + +#include +#include + +using std::cout; +using std::endl; + +int main(int argc, char** argv); + +#endif // CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_common.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_common.cpp new file mode 100644 index 000000000..13c2d9514 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_common.cpp @@ -0,0 +1,66 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +class CommonTest : public ::testing::Test {}; + +TEST_F(CommonTest, TestCublasHandler) { + int cuda_device_id; + CUDA_CHECK(cudaGetDevice(&cuda_device_id)); + EXPECT_TRUE(Caffe::cublas_handle()); +} + +TEST_F(CommonTest, TestBrewMode) { + Caffe::set_mode(Caffe::CPU); + EXPECT_EQ(Caffe::mode(), Caffe::CPU); + Caffe::set_mode(Caffe::GPU); + EXPECT_EQ(Caffe::mode(), Caffe::GPU); +} + +TEST_F(CommonTest, TestPhase) { + Caffe::set_phase(Caffe::TRAIN); + EXPECT_EQ(Caffe::phase(), Caffe::TRAIN); + Caffe::set_phase(Caffe::TEST); + EXPECT_EQ(Caffe::phase(), Caffe::TEST); +} + +TEST_F(CommonTest, TestRandSeedCPU) { + SyncedMemory data_a(10 * sizeof(int)); + SyncedMemory data_b(10 * sizeof(int)); + Caffe::set_random_seed(1701); + caffe_rng_bernoulli(10, 0.5, static_cast(data_a.mutable_cpu_data())); + + Caffe::set_random_seed(1701); + caffe_rng_bernoulli(10, 0.5, static_cast(data_b.mutable_cpu_data())); + + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(static_cast(data_a.cpu_data())[i], + static_cast(data_b.cpu_data())[i]); + } +} + +TEST_F(CommonTest, TestRandSeedGPU) { + SyncedMemory data_a(10 * sizeof(unsigned int)); + SyncedMemory data_b(10 * sizeof(unsigned int)); + Caffe::set_random_seed(1701); + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), + reinterpret_cast(data_a.mutable_gpu_data()), 10)); + Caffe::set_random_seed(1701); + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), + reinterpret_cast(data_b.mutable_gpu_data()), 10)); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], + ((const unsigned int*)(data_b.cpu_data()))[i]); + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_concat_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_concat_layer.cpp new file mode 100644 index 000000000..72e3c902c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_concat_layer.cpp @@ -0,0 +1,130 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class ConcatLayerTest : public ::testing::Test { + protected: + ConcatLayerTest() + : blob_bottom_0(new Blob(2, 3, 6, 5)), + blob_bottom_1(new Blob(2, 5, 6, 5)), + blob_bottom_2(new Blob(5, 3, 6, 5)), + blob_top_(new Blob()) {} + virtual void SetUp() { + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_0); + filler_param.set_value(2.); + filler.Fill(this->blob_bottom_1); + filler_param.set_value(3.); + filler.Fill(this->blob_bottom_2); + blob_bottom_vec_0.push_back(blob_bottom_0); + blob_bottom_vec_0.push_back(blob_bottom_1); + blob_bottom_vec_1.push_back(blob_bottom_0); + blob_bottom_vec_1.push_back(blob_bottom_2); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~ConcatLayerTest() { + delete blob_bottom_0; delete blob_bottom_1; + delete blob_bottom_2; delete blob_top_; + } + + Blob* const blob_bottom_0; + Blob* const blob_bottom_1; + Blob* const blob_bottom_2; + Blob* const blob_top_; + vector*> blob_bottom_vec_0, blob_bottom_vec_1; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(ConcatLayerTest, Dtypes); + +TYPED_TEST(ConcatLayerTest, TestSetupNum) { + LayerParameter layer_param; + layer_param.mutable_concat_param()->set_concat_dim(0); + ConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_1, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), + this->blob_bottom_0->num() + this->blob_bottom_2->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width()); +} + +TYPED_TEST(ConcatLayerTest, TestSetupChannels) { + LayerParameter layer_param; + ConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0->channels()+this->blob_bottom_1->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width()); +} + + +TYPED_TEST(ConcatLayerTest, TestCPUNum) { + LayerParameter layer_param; + ConcatLayer layer(layer_param); + Caffe::set_mode(Caffe::CPU); + layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_0, &(this->blob_top_vec_)); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_bottom_0->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), + this->blob_bottom_vec_0[0]->data_at(n, c, h, w)); + } + } + } + for (int c = 0; c < this->blob_bottom_1->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c+3, h, w), + this->blob_bottom_vec_0[1]->data_at(n, c, h, w)); + } + } + } + } +} + + +TYPED_TEST(ConcatLayerTest, TestCPUGradient) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradient(&layer, &(this->blob_bottom_vec_0), + &(this->blob_top_vec_)); +} + +TYPED_TEST(ConcatLayerTest, TestGPUGradient) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + ConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradient(&layer, &(this->blob_bottom_vec_0), + &(this->blob_top_vec_)); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_convolution_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_convolution_layer.cpp new file mode 100644 index 000000000..b08486e10 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_convolution_layer.cpp @@ -0,0 +1,288 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class ConvolutionLayerTest : public ::testing::Test { + protected: + ConvolutionLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + blob_bottom_->Reshape(2, 3, 6, 4); + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~ConvolutionLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(ConvolutionLayerTest, Dtypes); + +TYPED_TEST(ConvolutionLayerTest, TestSetup) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolution) { + // We will simply see if the convolution layer carries out averaging well. + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Caffe::set_mode(Caffe::CPU); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // After the convolution, the output should all have output values 27.1 + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], 27.1, 1e-4); + } +} + +TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolution) { + // We will simply see if the convolution layer carries out averaging well. + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Caffe::set_mode(Caffe::GPU); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // After the convolution, the output should all have output values 27.1 + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], 27.1, 1e-4); + } +} + +TYPED_TEST(ConvolutionLayerTest, TestCPUSimpleConvolutionGroup) { + // We will simply see if the convolution layer carries out averaging well. + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data(); + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + for (int c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int h = 0; h < this->blob_bottom_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_->width(); ++w) { + bottom_data[this->blob_bottom_->offset(n, c, h, w)] = c; + } + } + } + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Caffe::set_mode(Caffe::CPU); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // After the convolution, the output should all have output values 9.1 + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + TypeParam data = top_data[this->blob_top_->offset(n, c, h, w)]; + EXPECT_NEAR(data, c * 9 + 0.1, 1e-4); + } + } + } + } +} + + +TYPED_TEST(ConvolutionLayerTest, TestGPUSimpleConvolutionGroup) { + // We will simply see if the convolution layer carries out averaging well. + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + TypeParam* bottom_data = this->blob_bottom_->mutable_cpu_data(); + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + for (int c = 0; c < this->blob_bottom_->channels(); ++c) { + for (int h = 0; h < this->blob_bottom_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_->width(); ++w) { + bottom_data[this->blob_bottom_->offset(n, c, h, w)] = c; + } + } + } + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new ConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Caffe::set_mode(Caffe::GPU); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // After the convolution, the output should all have output values 9.1 + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + TypeParam data = top_data[this->blob_top_->offset(n, c, h, w)]; + EXPECT_NEAR(data, c * 9 + 0.1, 1e-4); + } + } + } + } +} + + +TYPED_TEST(ConvolutionLayerTest, TestCPUGradient) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + Caffe::set_mode(Caffe::CPU); + ConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + Caffe::set_mode(Caffe::CPU); + ConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + Caffe::set_mode(Caffe::GPU); + ConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + Caffe::set_mode(Caffe::GPU); + ConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/generate_sample_data.py b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/generate_sample_data.py new file mode 100644 index 000000000..0d8f5aa98 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/generate_sample_data.py @@ -0,0 +1,39 @@ +""" +Generate data used in the HDF5DataLayer test. +""" +import os +import numpy as np +import h5py + +num_cols = 8 +num_rows = 10 +height = 5 +width = 5 +total_size = num_cols * num_rows * height * width + +data = np.arange(total_size) +data = data.reshape(num_rows, num_cols, height, width) +data = data.astype('float32') +label = np.arange(num_rows)[:, np.newaxis] +label = label.astype('float32') + +print data +print label + +with h5py.File(os.path.dirname(__file__) + '/sample_data.h5', 'w') as f: + f['data'] = data + f['label'] = label + +with h5py.File(os.path.dirname(__file__) + '/sample_data_2_gzip.h5', 'w') as f: + f.create_dataset( + 'data', data=data + total_size, + compression='gzip', compression_opts=1 + ) + f.create_dataset( + 'label', data=label, + compression='gzip', compression_opts=1 + ) + +with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: + f.write(os.path.dirname(__file__) + '/sample_data.h5\n') + f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data.h5 b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data.h5 new file mode 100644 index 000000000..a1f923a71 Binary files /dev/null and b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data.h5 differ diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data_2_gzip.h5 b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data_2_gzip.h5 new file mode 100644 index 000000000..56c0a740e Binary files /dev/null and b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data_2_gzip.h5 differ diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data_list.txt b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data_list.txt new file mode 100644 index 000000000..cdf343fc9 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data/sample_data_list.txt @@ -0,0 +1,2 @@ +src/caffe/test/test_data/sample_data.h5 +src/caffe/test/test_data/sample_data_2_gzip.h5 diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data_layer.cpp new file mode 100644 index 000000000..68f2618d3 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_data_layer.cpp @@ -0,0 +1,487 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "leveldb/db.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/test/test_caffe_main.hpp" + +using std::string; +using std::stringstream; + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class DataLayerTest : public ::testing::Test { + protected: + DataLayerTest() + : blob_top_data_(new Blob()), + blob_top_label_(new Blob()), + filename_(new string(tmpnam(NULL))), + backend_(DataParameter_DB_LEVELDB), + seed_(1701) {} + virtual void SetUp() { + blob_top_vec_.push_back(blob_top_data_); + blob_top_vec_.push_back(blob_top_label_); + } + + // Fill the LevelDB with data: if unique_pixels, each pixel is unique but + // all images are the same; else each image is unique but all pixels within + // an image are the same. + void FillLevelDB(const bool unique_pixels) { + backend_ = DataParameter_DB_LEVELDB; + LOG(INFO) << "Using temporary leveldb " << *filename_; + leveldb::DB* db; + leveldb::Options options; + options.error_if_exists = true; + options.create_if_missing = true; + leveldb::Status status = + leveldb::DB::Open(options, filename_->c_str(), &db); + CHECK(status.ok()); + for (int i = 0; i < 5; ++i) { + Datum datum; + datum.set_label(i); + datum.set_channels(2); + datum.set_height(3); + datum.set_width(4); + std::string* data = datum.mutable_data(); + for (int j = 0; j < 24; ++j) { + int datum = unique_pixels ? j : i; + data->push_back(static_cast(datum)); + } + stringstream ss; + ss << i; + db->Put(leveldb::WriteOptions(), ss.str(), datum.SerializeAsString()); + } + delete db; + } + + // Fill the LMDB with data: unique_pixels has same meaning as in FillLevelDB. + void FillLMDB(const bool unique_pixels) { + backend_ = DataParameter_DB_LMDB; + LOG(INFO) << "Using temporary lmdb " << *filename_; + CHECK_EQ(mkdir(filename_->c_str(), 0744), 0) << "mkdir " << filename_ + << "failed"; + MDB_env *env; + MDB_dbi dbi; + MDB_val mdbkey, mdbdata; + MDB_txn *txn; + CHECK_EQ(mdb_env_create(&env), MDB_SUCCESS) << "mdb_env_create failed"; + CHECK_EQ(mdb_env_set_mapsize(env, 1099511627776), MDB_SUCCESS) // 1TB + << "mdb_env_set_mapsize failed"; + CHECK_EQ(mdb_env_open(env, filename_->c_str(), 0, 0664), MDB_SUCCESS) + << "mdb_env_open failed"; + CHECK_EQ(mdb_txn_begin(env, NULL, 0, &txn), MDB_SUCCESS) + << "mdb_txn_begin failed"; + CHECK_EQ(mdb_open(txn, NULL, 0, &dbi), MDB_SUCCESS) << "mdb_open failed"; + + for (int i = 0; i < 5; ++i) { + Datum datum; + datum.set_label(i); + datum.set_channels(2); + datum.set_height(3); + datum.set_width(4); + std::string* data = datum.mutable_data(); + for (int j = 0; j < 24; ++j) { + int datum = unique_pixels ? j : i; + data->push_back(static_cast(datum)); + } + stringstream ss; + ss << i; + + string value; + datum.SerializeToString(&value); + mdbdata.mv_size = value.size(); + mdbdata.mv_data = reinterpret_cast(&value[0]); + string keystr = ss.str(); + mdbkey.mv_size = keystr.size(); + mdbkey.mv_data = reinterpret_cast(&keystr[0]); + CHECK_EQ(mdb_put(txn, dbi, &mdbkey, &mdbdata, 0), MDB_SUCCESS) + << "mdb_put failed"; + } + CHECK_EQ(mdb_txn_commit(txn), MDB_SUCCESS) << "mdb_txn_commit failed"; + mdb_close(env, dbi); + mdb_env_close(env); + } + + void TestRead() { + const Dtype scale = 3; + LayerParameter param; + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_scale(scale); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + DataLayer layer(param); + layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + EXPECT_EQ(blob_top_data_->num(), 5); + EXPECT_EQ(blob_top_data_->channels(), 2); + EXPECT_EQ(blob_top_data_->height(), 3); + EXPECT_EQ(blob_top_data_->width(), 4); + EXPECT_EQ(blob_top_label_->num(), 5); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 1); + + for (int iter = 0; iter < 100; ++iter) { + layer.Forward(blob_bottom_vec_, &blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 24; ++j) { + EXPECT_EQ(scale * i, blob_top_data_->cpu_data()[i * 24 + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + } + + void TestReadCrop() { + const Dtype scale = 3; + LayerParameter param; + Caffe::set_random_seed(1701); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_scale(scale); + data_param->set_crop_size(1); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + DataLayer layer(param); + layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + EXPECT_EQ(blob_top_data_->num(), 5); + EXPECT_EQ(blob_top_data_->channels(), 2); + EXPECT_EQ(blob_top_data_->height(), 1); + EXPECT_EQ(blob_top_data_->width(), 1); + EXPECT_EQ(blob_top_label_->num(), 5); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 1); + + for (int iter = 0; iter < 2; ++iter) { + layer.Forward(blob_bottom_vec_, &blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + int num_with_center_value = 0; + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + const Dtype center_value = scale * (j ? 17 : 5); + num_with_center_value += + (center_value == blob_top_data_->cpu_data()[i * 2 + j]); + // At TEST time, check that we always get center value. + if (Caffe::phase() == Caffe::TEST) { + EXPECT_EQ(center_value, this->blob_top_data_->cpu_data()[i * 2 + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + // At TRAIN time, check that we did not get the center crop all 10 times. + // (This check fails with probability 1-1/12^10 in a correct + // implementation, so we call set_random_seed.) + if (Caffe::phase() == Caffe::TRAIN) { + EXPECT_LT(num_with_center_value, 10); + } + } + } + + void TestReadCropTrainSequenceSeeded() { + LayerParameter param; + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_crop_size(1); + data_param->set_mirror(true); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + // Get crop sequence with Caffe seed 1701. + Caffe::set_random_seed(seed_); + vector > crop_sequence; + { + DataLayer layer1(param); + layer1.SetUp(blob_bottom_vec_, &blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer1.Forward(blob_bottom_vec_, &blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + vector iter_crop_sequence; + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + iter_crop_sequence.push_back( + blob_top_data_->cpu_data()[i * 2 + j]); + } + } + crop_sequence.push_back(iter_crop_sequence); + } + } // destroy 1st data layer and unlock the leveldb + + // Get crop sequence after reseeding Caffe with 1701. + // Check that the sequence is the same as the original. + Caffe::set_random_seed(seed_); + DataLayer layer2(param); + layer2.SetUp(blob_bottom_vec_, &blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer2.Forward(blob_bottom_vec_, &blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + EXPECT_EQ(crop_sequence[iter][i * 2 + j], + blob_top_data_->cpu_data()[i * 2 + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + } + + void TestReadCropTrainSequenceUnseeded() { + LayerParameter param; + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(5); + data_param->set_crop_size(1); + data_param->set_mirror(true); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + // Get crop sequence with Caffe seed 1701, srand seed 1701. + Caffe::set_random_seed(seed_); + srand(seed_); + vector > crop_sequence; + { + DataLayer layer1(param); + layer1.SetUp(blob_bottom_vec_, &blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer1.Forward(blob_bottom_vec_, &blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + vector iter_crop_sequence; + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + iter_crop_sequence.push_back( + blob_top_data_->cpu_data()[i * 2 + j]); + } + } + crop_sequence.push_back(iter_crop_sequence); + } + } // destroy 1st data layer and unlock the leveldb + + // Get crop sequence continuing from previous Caffe RNG state; reseed + // srand with 1701. Check that the sequence differs from the original. + srand(seed_); + DataLayer layer2(param); + layer2.SetUp(blob_bottom_vec_, &blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer2.Forward(blob_bottom_vec_, &blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + int num_sequence_matches = 0; + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 2; ++j) { + num_sequence_matches += (crop_sequence[iter][i * 2 + j] == + blob_top_data_->cpu_data()[i * 2 + j]); + } + } + EXPECT_LT(num_sequence_matches, 10); + } + } + + virtual ~DataLayerTest() { delete blob_top_data_; delete blob_top_label_; } + + DataParameter_DB backend_; + shared_ptr filename_; + Blob* const blob_top_data_; + Blob* const blob_top_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + int seed_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(DataLayerTest, Dtypes); + +TYPED_TEST(DataLayerTest, TestReadLevelDBCPU) { + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = false; // all pixels the same; images different + this->FillLevelDB(unique_pixels); + this->TestRead(); +} + +TYPED_TEST(DataLayerTest, TestReadLevelDBGPU) { + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = false; // all pixels the same; images different + this->FillLevelDB(unique_pixels); + this->TestRead(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDBCPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLevelDB(unique_pixels); + this->TestReadCrop(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTrainLevelDBGPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLevelDB(unique_pixels); + this->TestReadCrop(); +} + +// Test that the sequence of random crops is consistent when using +// Caffe::set_random_seed. +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDBCPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLevelDB(unique_pixels); + this->TestReadCropTrainSequenceSeeded(); +} + +// Test that the sequence of random crops is consistent when using +// Caffe::set_random_seed. +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLevelDBGPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLevelDB(unique_pixels); + this->TestReadCropTrainSequenceSeeded(); +} + +// Test that the sequence of random crops differs across iterations when +// Caffe::set_random_seed isn't called (and seeds from srand are ignored). +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDBCPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLevelDB(unique_pixels); + this->TestReadCropTrainSequenceUnseeded(); +} + +// Test that the sequence of random crops differs across iterations when +// Caffe::set_random_seed isn't called (and seeds from srand are ignored). +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLevelDBGPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLevelDB(unique_pixels); + this->TestReadCropTrainSequenceUnseeded(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTestLevelDBCPU) { + Caffe::set_phase(Caffe::TEST); + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLevelDB(unique_pixels); + this->TestReadCrop(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTestLevelDBGPU) { + Caffe::set_phase(Caffe::TEST); + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLevelDB(unique_pixels); + this->TestReadCrop(); +} + +TYPED_TEST(DataLayerTest, TestReadLMDBCPU) { + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = false; // all pixels the same; images different + this->FillLMDB(unique_pixels); + this->TestRead(); +} + +TYPED_TEST(DataLayerTest, TestReadLMDBGPU) { + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = false; // all pixels the same; images different + this->FillLMDB(unique_pixels); + this->TestRead(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTrainLMDBCPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLMDB(unique_pixels); + this->TestReadCrop(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTrainLMDBGPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLMDB(unique_pixels); + this->TestReadCrop(); +} + +// Test that the sequence of random crops is consistent when using +// Caffe::set_random_seed. +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDBCPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLMDB(unique_pixels); + this->TestReadCropTrainSequenceSeeded(); +} + +// Test that the sequence of random crops is consistent when using +// Caffe::set_random_seed. +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceSeededLMDBGPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLMDB(unique_pixels); + this->TestReadCropTrainSequenceSeeded(); +} + +// Test that the sequence of random crops differs across iterations when +// Caffe::set_random_seed isn't called (and seeds from srand are ignored). +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDBCPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLMDB(unique_pixels); + this->TestReadCropTrainSequenceUnseeded(); +} + +// Test that the sequence of random crops differs across iterations when +// Caffe::set_random_seed isn't called (and seeds from srand are ignored). +TYPED_TEST(DataLayerTest, TestReadCropTrainSequenceUnseededLMDBGPU) { + Caffe::set_phase(Caffe::TRAIN); + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLMDB(unique_pixels); + this->TestReadCropTrainSequenceUnseeded(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTestLMDBCPU) { + Caffe::set_phase(Caffe::TEST); + Caffe::set_mode(Caffe::CPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLMDB(unique_pixels); + this->TestReadCrop(); +} + +TYPED_TEST(DataLayerTest, TestReadCropTestLMDBGPU) { + Caffe::set_phase(Caffe::TEST); + Caffe::set_mode(Caffe::GPU); + const bool unique_pixels = true; // all images the same; pixels different + this->FillLMDB(unique_pixels); + this->TestReadCrop(); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_dummy_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_dummy_data_layer.cpp new file mode 100644 index 000000000..7d9287e86 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_dummy_data_layer.cpp @@ -0,0 +1,202 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/test/test_caffe_main.hpp" + +using std::string; +using std::stringstream; + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class DummyDataLayerTest : public ::testing::Test { + protected: + DummyDataLayerTest() + : blob_top_a_(new Blob()), + blob_top_b_(new Blob()), + blob_top_c_(new Blob()) {} + + virtual void SetUp() { + blob_bottom_vec_.clear(); + blob_top_vec_.clear(); + blob_top_vec_.push_back(blob_top_a_); + blob_top_vec_.push_back(blob_top_b_); + blob_top_vec_.push_back(blob_top_c_); + } + + virtual ~DummyDataLayerTest() { + delete blob_top_a_; + delete blob_top_b_; + delete blob_top_c_; + } + + Blob* const blob_top_a_; + Blob* const blob_top_b_; + Blob* const blob_top_c_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(DummyDataLayerTest, Dtypes); + +TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { + Caffe::set_mode(Caffe::CPU); + LayerParameter param; + DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); + dummy_data_param->add_num(5); + dummy_data_param->add_channels(3); + dummy_data_param->add_height(2); + dummy_data_param->add_width(4); + this->blob_top_vec_.resize(1); + DummyDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 5); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 2); + EXPECT_EQ(this->blob_top_a_->width(), 4); + EXPECT_EQ(this->blob_top_b_->count(), 0); + EXPECT_EQ(this->blob_top_c_->count(), 0); + for (int i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]); + } + } + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + for (int i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]); + } + } +} + +TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { + Caffe::set_mode(Caffe::CPU); + LayerParameter param; + DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); + dummy_data_param->add_num(5); + dummy_data_param->add_channels(3); + dummy_data_param->add_height(2); + dummy_data_param->add_width(4); + dummy_data_param->add_num(5); + // Don't explicitly set number of channels or height for 2nd top blob; should + // default to first channels and height (as we check later). + dummy_data_param->add_height(1); + FillerParameter* data_filler_param = dummy_data_param->add_data_filler(); + data_filler_param->set_value(7); + this->blob_top_vec_.resize(2); + DummyDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 5); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 2); + EXPECT_EQ(this->blob_top_a_->width(), 4); + EXPECT_EQ(this->blob_top_b_->num(), 5); + EXPECT_EQ(this->blob_top_b_->channels(), 3); + EXPECT_EQ(this->blob_top_b_->height(), 1); + EXPECT_EQ(this->blob_top_b_->width(), 4); + EXPECT_EQ(this->blob_top_c_->count(), 0); + for (int i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]); + } + } + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + for (int i = 0; i < this->blob_top_vec_.size(); ++i) { + for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { + EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]); + } + } +} + +TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { + Caffe::set_mode(Caffe::CPU); + LayerParameter param; + DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); + dummy_data_param->add_num(5); + dummy_data_param->add_channels(3); + dummy_data_param->add_height(2); + dummy_data_param->add_width(4); + FillerParameter* data_filler_param_a = dummy_data_param->add_data_filler(); + data_filler_param_a->set_value(7); + FillerParameter* data_filler_param_b = dummy_data_param->add_data_filler(); + data_filler_param_b->set_type("gaussian"); + TypeParam gaussian_mean = 3.0; + TypeParam gaussian_std = 0.01; + data_filler_param_b->set_mean(gaussian_mean); + data_filler_param_b->set_std(gaussian_std); + FillerParameter* data_filler_param_c = dummy_data_param->add_data_filler(); + data_filler_param_c->set_value(9); + DummyDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 5); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 2); + EXPECT_EQ(this->blob_top_a_->width(), 4); + EXPECT_EQ(this->blob_top_b_->num(), 5); + EXPECT_EQ(this->blob_top_b_->channels(), 3); + EXPECT_EQ(this->blob_top_b_->height(), 2); + EXPECT_EQ(this->blob_top_b_->width(), 4); + EXPECT_EQ(this->blob_top_c_->num(), 5); + EXPECT_EQ(this->blob_top_c_->channels(), 3); + EXPECT_EQ(this->blob_top_c_->height(), 2); + EXPECT_EQ(this->blob_top_c_->width(), 4); + for (int i = 0; i < this->blob_top_a_->count(); ++i) { + EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); + } + // Blob b uses a Gaussian filler, so SetUp should not have initialized it. + // Blob b's data should therefore be the default Blob data value: 0. + for (int i = 0; i < this->blob_top_b_->count(); ++i) { + EXPECT_EQ(0, this->blob_top_b_->cpu_data()[i]); + } + for (int i = 0; i < this->blob_top_c_->count(); ++i) { + EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); + } + + // Do a Forward pass to fill in Blob b with Gaussian data. + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + for (int i = 0; i < this->blob_top_a_->count(); ++i) { + EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); + } + // Check that the Gaussian's data has been filled in with values within + // 10 standard deviations of the mean. Record the first and last sample. + // to check that they're different after the next Forward pass. + for (int i = 0; i < this->blob_top_b_->count(); ++i) { + EXPECT_NEAR(gaussian_mean, this->blob_top_b_->cpu_data()[i], + gaussian_std * 10); + } + const TypeParam first_gaussian_sample = this->blob_top_b_->cpu_data()[0]; + const TypeParam last_gaussian_sample = + this->blob_top_b_->cpu_data()[this->blob_top_b_->count() - 1]; + for (int i = 0; i < this->blob_top_c_->count(); ++i) { + EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); + } + + // Do another Forward pass to fill in Blob b with Gaussian data again, + // checking that we get different values. + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + for (int i = 0; i < this->blob_top_a_->count(); ++i) { + EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); + } + for (int i = 0; i < this->blob_top_b_->count(); ++i) { + EXPECT_NEAR(gaussian_mean, this->blob_top_b_->cpu_data()[i], + gaussian_std * 10); + } + EXPECT_NE(first_gaussian_sample, this->blob_top_b_->cpu_data()[0]); + EXPECT_NE(last_gaussian_sample, + this->blob_top_b_->cpu_data()[this->blob_top_b_->count() - 1]); + for (int i = 0; i < this->blob_top_c_->count(); ++i) { + EXPECT_EQ(9, this->blob_top_c_->cpu_data()[i]); + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_eltwise_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_eltwise_layer.cpp new file mode 100644 index 000000000..5f72f625d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_eltwise_layer.cpp @@ -0,0 +1,251 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class EltwiseLayerTest : public ::testing::Test { + protected: + EltwiseLayerTest() + : blob_bottom_a_(new Blob(2, 3, 4, 5)), + blob_bottom_b_(new Blob(2, 3, 4, 5)), + blob_bottom_c_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_a_); + filler.Fill(this->blob_bottom_b_); + filler.Fill(this->blob_bottom_c_); + blob_bottom_vec_.push_back(blob_bottom_a_); + blob_bottom_vec_.push_back(blob_bottom_b_); + blob_bottom_vec_.push_back(blob_bottom_c_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~EltwiseLayerTest() { + delete blob_bottom_a_; + delete blob_bottom_b_; + delete blob_bottom_c_; + delete blob_top_; + } + Blob* const blob_bottom_a_; + Blob* const blob_bottom_b_; + Blob* const blob_bottom_c_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(EltwiseLayerTest, Dtypes); + +TYPED_TEST(EltwiseLayerTest, TestSetUp) { + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 5); +} + +TYPED_TEST(EltwiseLayerTest, TestProdCPU) { + Caffe::set_mode(Caffe::CPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data(); + const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data(); + const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]); + } +} + +TYPED_TEST(EltwiseLayerTest, TestSumCPU) { + Caffe::set_mode(Caffe::CPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data(); + const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data(); + const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]); + } +} + +TYPED_TEST(EltwiseLayerTest, TestSumCoeffCPU) { + Caffe::set_mode(Caffe::CPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data(); + const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data(); + const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i], + 1e-4); + } +} + +TYPED_TEST(EltwiseLayerTest, TestProdGPU) { + Caffe::set_mode(Caffe::GPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data(); + const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data(); + const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]); + } +} + +TYPED_TEST(EltwiseLayerTest, TestSumGPU) { + Caffe::set_mode(Caffe::GPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data(); + const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data(); + const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]); + } +} + +TYPED_TEST(EltwiseLayerTest, TestSumCoeffGPU) { + Caffe::set_mode(Caffe::GPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + shared_ptr > layer( + new EltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const TypeParam* in_data_a = this->blob_bottom_a_->cpu_data(); + const TypeParam* in_data_b = this->blob_bottom_b_->cpu_data(); + const TypeParam* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i], + 1e-4); + } +} + +TYPED_TEST(EltwiseLayerTest, TestProdCPUGradient) { + Caffe::set_mode(Caffe::CPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(EltwiseLayerTest, TestSumCPUGradient) { + Caffe::set_mode(Caffe::CPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(EltwiseLayerTest, TestSumCoeffCPUGradient) { + Caffe::set_mode(Caffe::CPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(EltwiseLayerTest, TestSumGPUGradient) { + Caffe::set_mode(Caffe::GPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(EltwiseLayerTest, TestSumCoeffGPUGradient) { + Caffe::set_mode(Caffe::GPU); + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + EltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_euclidean_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_euclidean_loss_layer.cpp new file mode 100644 index 000000000..d5e4107ac --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_euclidean_loss_layer.cpp @@ -0,0 +1,59 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class EuclideanLossLayerTest : public ::testing::Test { + protected: + EuclideanLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 5, 1, 1)) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + filler.Fill(this->blob_bottom_label_); + blob_bottom_vec_.push_back(blob_bottom_label_); + } + virtual ~EuclideanLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(EuclideanLossLayerTest, Dtypes); + +TYPED_TEST(EuclideanLossLayerTest, TestGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + EuclideanLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_filler.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_filler.cpp new file mode 100644 index 000000000..e8b556a66 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_filler.cpp @@ -0,0 +1,149 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/filler.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +typedef ::testing::Types Dtypes; + +template +class ConstantFillerTest : public ::testing::Test { + protected: + ConstantFillerTest() + : blob_(new Blob(2, 3, 4, 5)), + filler_param_() { + filler_param_.set_value(10.); + filler_.reset(new ConstantFiller(filler_param_)); + filler_->Fill(blob_); + } + virtual ~ConstantFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(ConstantFillerTest, Dtypes); + +TYPED_TEST(ConstantFillerTest, TestFill) { + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const TypeParam* data = this->blob_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], this->filler_param_.value()); + } +} + + +template +class UniformFillerTest : public ::testing::Test { + protected: + UniformFillerTest() + : blob_(new Blob(2, 3, 4, 5)), + filler_param_() { + filler_param_.set_min(1.); + filler_param_.set_max(2.); + filler_.reset(new UniformFiller(filler_param_)); + filler_->Fill(blob_); + } + virtual ~UniformFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(UniformFillerTest, Dtypes); + +TYPED_TEST(UniformFillerTest, TestFill) { + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const TypeParam* data = this->blob_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], this->filler_param_.min()); + EXPECT_LE(data[i], this->filler_param_.max()); + } +} + +template +class PositiveUnitballFillerTest : public ::testing::Test { + protected: + PositiveUnitballFillerTest() + : blob_(new Blob(2, 3, 4, 5)), + filler_param_() { + filler_.reset(new PositiveUnitballFiller(filler_param_)); + filler_->Fill(blob_); + } + virtual ~PositiveUnitballFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(PositiveUnitballFillerTest, Dtypes); + +TYPED_TEST(PositiveUnitballFillerTest, TestFill) { + EXPECT_TRUE(this->blob_); + const int num = this->blob_->num(); + const int count = this->blob_->count(); + const int dim = count / num; + const TypeParam* data = this->blob_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 0); + EXPECT_LE(data[i], 1); + } + for (int i = 0; i < num; ++i) { + TypeParam sum = 0; + for (int j = 0; j < dim; ++j) { + sum += data[i * dim + j]; + } + EXPECT_GE(sum, 0.999); + EXPECT_LE(sum, 1.001); + } +} + +template +class GaussianFillerTest : public ::testing::Test { + protected: + GaussianFillerTest() + : blob_(new Blob(2, 3, 4, 5)), + filler_param_() { + filler_param_.set_mean(10.); + filler_param_.set_std(0.1); + filler_.reset(new GaussianFiller(filler_param_)); + filler_->Fill(blob_); + } + virtual ~GaussianFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(GaussianFillerTest, Dtypes); + +TYPED_TEST(GaussianFillerTest, TestFill) { + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const TypeParam* data = this->blob_->cpu_data(); + TypeParam mean = 0.; + TypeParam var = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + var += (data[i] - this->filler_param_.mean()) * + (data[i] - this->filler_param_.mean()); + } + mean /= count; + var /= count; + // Very loose test. + EXPECT_GE(mean, this->filler_param_.mean() - this->filler_param_.std() * 5); + EXPECT_LE(mean, this->filler_param_.mean() + this->filler_param_.std() * 5); + TypeParam target_var = this->filler_param_.std() * this->filler_param_.std(); + EXPECT_GE(var, target_var / 5.); + EXPECT_LE(var, target_var * 5.); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_flatten_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_flatten_layer.cpp new file mode 100644 index 000000000..52c567b02 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_flatten_layer.cpp @@ -0,0 +1,101 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class FlattenLayerTest : public ::testing::Test { + protected: + FlattenLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~FlattenLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(FlattenLayerTest, Dtypes); + +TYPED_TEST(FlattenLayerTest, TestSetup) { + LayerParameter layer_param; + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(FlattenLayerTest, TestCPU) { + LayerParameter layer_param; + FlattenLayer layer(layer_param); + Caffe::set_mode(Caffe::CPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int c = 0; c < 3 * 6 * 5; ++c) { + EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), + this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5)); + EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0), + this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5)); + } +} + +TYPED_TEST(FlattenLayerTest, TestGPU) { + LayerParameter layer_param; + FlattenLayer layer(layer_param); + Caffe::set_mode(Caffe::GPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int c = 0; c < 3 * 6 * 5; ++c) { + EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), + this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5)); + EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0), + this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5)); + } +} + +TYPED_TEST(FlattenLayerTest, TestCPUGradient) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + FlattenLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(FlattenLayerTest, TestGPUGradient) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + FlattenLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_format.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_format.cpp new file mode 100644 index 000000000..4606df0af --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_format.cpp @@ -0,0 +1,67 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include + +#include "gtest/gtest.h" +#include "caffe/common.hpp" +#include "caffe/blob.hpp" +#include "caffe/filler.hpp" +#include "caffe/util/format.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { +using std::string; + +template +class FormatTest : public ::testing::Test { + protected: + FormatTest() : image_file_path_("src/caffe/test/test_data/lena.png") { + } + virtual ~FormatTest() {} + string image_file_path_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(FormatTest, Dtypes); + +TYPED_TEST(FormatTest, TestOpenCVImageToDatum) { + cv::Mat cv_img = cv::imread(this->image_file_path_, CV_LOAD_IMAGE_COLOR); + Datum* datum; + int label = 1001; + string data; + int index; + datum = new Datum(); + OpenCVImageToDatum(cv_img, label, 128, 256, datum); + EXPECT_EQ(datum->channels(), 3); + EXPECT_EQ(datum->height(), 128); + EXPECT_EQ(datum->width(), 256); + EXPECT_EQ(datum->label(), label); + delete datum; + // Cases without resizing + int heights[] = {-1, 0, cv_img.rows, cv_img.rows, cv_img.rows}; + int widths[] = {cv_img.cols, cv_img.cols, 0, -1, cv_img.cols}; + for (int i = 0; i < 3; ++i) { + datum = new Datum(); + OpenCVImageToDatum(cv_img, ++label, heights[i], widths[i], datum); + EXPECT_EQ(datum->channels(), 3); + EXPECT_EQ(datum->height(), cv_img.rows); + EXPECT_EQ(datum->width(), cv_img.cols); + EXPECT_EQ(datum->label(), label); + data = datum->data(); + index = 0; + for (int c = 0; c < 3; ++c) { + for (int h = 0; h < cv_img.rows; ++h) { + for (int w = 0; w < cv_img.cols; ++w) { + EXPECT_EQ(static_cast(cv_img.at(h, w)[c]), + data[index++]); + } + } + } + delete datum; + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_gradient_check_util.hpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_gradient_check_util.hpp new file mode 100644 index 000000000..bcf03973d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_gradient_check_util.hpp @@ -0,0 +1,249 @@ +// Copyright 2014 BVLC and contributors. + +#ifndef CAFFE_TEST_GRADIENT_CHECK_UTIL_H_ +#define CAFFE_TEST_GRADIENT_CHECK_UTIL_H_ + +#include +#include + +#include +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/net.hpp" + +using std::max; + +namespace caffe { + +// The gradient checker adds a L2 normalization loss function on top of the +// top blobs, and checks the gradient. +template +class GradientChecker { + public: + // kink and kink_range specify an ignored nonsmooth region of the form + // kink - kink_range <= |feature value| <= kink + kink_range, + // which accounts for all nonsmoothness in use by caffe + GradientChecker(const Dtype stepsize, const Dtype threshold, + const unsigned int seed = 1701, const Dtype kink = 0., + const Dtype kink_range = -1) + : stepsize_(stepsize), threshold_(threshold), seed_(seed), + kink_(kink), kink_range_(kink_range) {} + // Checks the gradient of a layer, with provided bottom layers and top + // layers. + // Note that after the gradient check, we do not guarantee that the data + // stored in the layer parameters and the blobs are unchanged. + void CheckGradient(Layer* layer, vector*>* bottom, + vector*>* top, int check_bottom = -1) { + layer->SetUp(*bottom, top); + CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1); + } + void CheckGradientExhaustive(Layer* layer, + vector*>* bottom, vector*>* top, + int check_bottom = -1); + + // CheckGradientEltwise can be used to test layers that perform element-wise + // computation only (e.g., neuron layers) -- where (d y_i) / (d x_j) = 0 when + // i != j. + void CheckGradientEltwise(Layer* layer, + vector*>* bottom, vector*>* top); + + void CheckGradientSingle(Layer* layer, vector*>* bottom, + vector*>* top, int check_bottom, int top_id, + int top_data_id, bool element_wise = false); + + // Checks the gradient of a network. This network should not have any data + // layers or loss layers, since the function does not explicitly deal with + // such cases yet. All input blobs and parameter blobs are going to be + // checked, layer-by-layer to avoid numerical problems to accumulate. + void CheckGradientNet(const Net& net, + const vector*>& input); + + protected: + Dtype GetObjAndGradient(vector*>* top, int top_id = -1, + int top_data_id = -1); + Dtype stepsize_; + Dtype threshold_; + unsigned int seed_; + Dtype kink_; + Dtype kink_range_; +}; + + +template +void GradientChecker::CheckGradientSingle(Layer* layer, + vector*>* bottom, vector*>* top, + int check_bottom, int top_id, int top_data_id, bool element_wise) { + if (element_wise) { + CHECK_EQ(0, layer->blobs().size()); + CHECK_LE(0, top_id); + CHECK_LE(0, top_data_id); + const int top_count = (*top)[top_id]->count(); + for (int blob_id = 0; blob_id < bottom->size(); ++blob_id) { + CHECK_EQ(top_count, (*bottom)[blob_id]->count()); + } + } + // First, figure out what blobs we need to check against. + vector*> blobs_to_check; + for (int i = 0; i < layer->blobs().size(); ++i) { + blobs_to_check.push_back(layer->blobs()[i].get()); + } + if (check_bottom < 0) { + for (int i = 0; i < bottom->size(); ++i) { + blobs_to_check.push_back((*bottom)[i]); + } + } else { + CHECK(check_bottom < bottom->size()); + blobs_to_check.push_back((*bottom)[check_bottom]); + } + // Compute the gradient analytically using Backward + Caffe::set_random_seed(seed_); + // Get any loss from the layer + Dtype computed_objective = layer->Forward(*bottom, top); + // Get additional loss from the objective + computed_objective += GetObjAndGradient(top, top_id, top_data_id); + layer->Backward(*top, true, bottom); + // Store computed gradients for all checked blobs + vector > > + computed_gradient_blobs(blobs_to_check.size()); + for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { + Blob* current_blob = blobs_to_check[blob_id]; + computed_gradient_blobs[blob_id].reset(new Blob()); + computed_gradient_blobs[blob_id]->ReshapeLike(*current_blob); + const int count = blobs_to_check[blob_id]->count(); + const Dtype* diff = blobs_to_check[blob_id]->cpu_diff(); + Dtype* computed_gradients = + computed_gradient_blobs[blob_id]->mutable_cpu_data(); + caffe_copy(count, diff, computed_gradients); + } + // Compute derivative of top w.r.t. each bottom and parameter input using + // finite differencing. + // LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs."; + for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) { + Blob* current_blob = blobs_to_check[blob_id]; + const Dtype* computed_gradients = + computed_gradient_blobs[blob_id]->cpu_data(); + // LOG(ERROR) << "Blob " << blob_id << ": checking " + // << current_blob->count() << " parameters."; + for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) { + // For an element-wise layer, we only need to do finite differencing to + // compute the derivative of (*top)[top_id][top_data_id] w.r.t. + // (*bottom)[blob_id][i] only for i == top_data_id. For any other + // i != top_data_id, we know the derivative is 0 by definition, and simply + // check that that's true. + Dtype estimated_gradient = 0; + if (!element_wise || (feat_id == top_data_id)) { + // Do finite differencing. + // Compute loss with stepsize_ added to input. + current_blob->mutable_cpu_data()[feat_id] += stepsize_; + Caffe::set_random_seed(seed_); + Dtype positive_objective = layer->Forward(*bottom, top); + positive_objective += GetObjAndGradient(top, top_id, top_data_id); + // Compute loss with stepsize_ subtracted from input. + current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2; + Caffe::set_random_seed(seed_); + Dtype negative_objective = layer->Forward(*bottom, top); + negative_objective += GetObjAndGradient(top, top_id, top_data_id); + // Recover original input value. + current_blob->mutable_cpu_data()[feat_id] += stepsize_; + estimated_gradient = (positive_objective - negative_objective) / + stepsize_ / 2.; + } + Dtype computed_gradient = computed_gradients[feat_id]; + Dtype feature = current_blob->cpu_data()[feat_id]; + // LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " " + // << current_blob->cpu_diff()[feat_id]; + if (kink_ - kink_range_ > fabs(feature) + || fabs(feature) > kink_ + kink_range_) { + // We check relative accuracy, but for too small values, we threshold + // the scale factor by 1. + Dtype scale = max( + max(fabs(computed_gradient), fabs(estimated_gradient)), 1.); + EXPECT_NEAR(computed_gradient, estimated_gradient, threshold_ * scale) + << "debug: (top_id, top_data_id, blob_id, feat_id)=" + << top_id << "," << top_data_id << "," << blob_id << "," << feat_id; + } + // LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id]; + // LOG(ERROR) << "computed gradient: " << computed_gradient + // << " estimated_gradient: " << estimated_gradient; + } + } +} + +template +void GradientChecker::CheckGradientExhaustive(Layer* layer, + vector*>* bottom, vector*>* top, int check_bottom) { + layer->SetUp(*bottom, top); + CHECK_GT(top->size(), 0) << "Exhaustive mode requires at least one top blob."; + // LOG(ERROR) << "Exhaustive Mode."; + for (int i = 0; i < top->size(); ++i) { + // LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count(); + for (int j = 0; j < (*top)[i]->count(); ++j) { + // LOG(ERROR) << "Exhaustive: blob " << i << " data " << j; + CheckGradientSingle(layer, bottom, top, check_bottom, i, j); + } + } +} + +template +void GradientChecker::CheckGradientEltwise(Layer* layer, + vector*>* bottom, vector*>* top) { + layer->SetUp(*bottom, top); + CHECK_GT(top->size(), 0) << "Eltwise mode requires at least one top blob."; + const int check_bottom = -1; + const bool element_wise = true; + for (int i = 0; i < top->size(); ++i) { + for (int j = 0; j < (*top)[i]->count(); ++j) { + CheckGradientSingle(layer, bottom, top, check_bottom, i, j, element_wise); + } + } +} + +template +void GradientChecker::CheckGradientNet( + const Net& net, const vector*>& input) { + const vector > >& layers = net.layers(); + vector*> >& bottom_vecs = net.bottom_vecs(); + vector*> >& top_vecs = net.top_vecs(); + for (int i = 0; i < layers.size(); ++i) { + net.Forward(input); + LOG(ERROR) << "Checking gradient for " << layers[i]->layer_param().name(); + CheckGradientExhaustive(*(layers[i].get()), bottom_vecs[i], top_vecs[i]); + } +} + +template +Dtype GradientChecker::GetObjAndGradient(vector*>* top, + int top_id, int top_data_id) { + Dtype loss = 0; + if (top_id < 0) { + // the loss will be half of the sum of squares of all outputs + for (int i = 0; i < top->size(); ++i) { + Blob* top_blob = (*top)[i]; + const Dtype* top_blob_data = top_blob->cpu_data(); + Dtype* top_blob_diff = top_blob->mutable_cpu_diff(); + int count = top_blob->count(); + for (int j = 0; j < count; ++j) { + loss += top_blob_data[j] * top_blob_data[j]; + } + // set the diff: simply the data. + memcpy(top_blob_diff, top_blob_data, sizeof(Dtype) * top_blob->count()); + } + loss /= 2.; + } else { + // the loss will be the top_data_id-th element in the top_id-th blob. + for (int i = 0; i < top->size(); ++i) { + Blob* top_blob = (*top)[i]; + Dtype* top_blob_diff = top_blob->mutable_cpu_diff(); + memset(top_blob_diff, 0, sizeof(Dtype) * top_blob->count()); + } + loss = (*top)[top_id]->cpu_data()[top_data_id]; + (*top)[top_id]->mutable_cpu_diff()[top_data_id] = 1.; + } + return loss; +} + +} // namespace caffe + +#endif // CAFFE_TEST_GRADIENT_CHECK_UTIL_H_ diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hdf5_output_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hdf5_output_layer.cpp new file mode 100644 index 000000000..9f793f2fc --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hdf5_output_layer.cpp @@ -0,0 +1,125 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/test/test_caffe_main.hpp" + +using std::string; +using std::vector; + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class HDF5OutputLayerTest : public ::testing::Test { + protected: + HDF5OutputLayerTest() + : output_file_name_(tmpnam(NULL)), + input_file_name_("src/caffe/test/test_data/sample_data.h5"), + blob_data_(new Blob()), + blob_label_(new Blob()), + num_(5), + channels_(8), + height_(5), + width_(5) {} + + virtual ~HDF5OutputLayerTest() { + delete blob_data_; + delete blob_label_; + } + + void CheckBlobEqual(const Blob& b1, const Blob& b2); + + string output_file_name_; + string input_file_name_; + Blob* const blob_data_; + Blob* const blob_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + int num_; + int channels_; + int height_; + int width_; +}; + +template +void HDF5OutputLayerTest::CheckBlobEqual( + const Blob& b1, const Blob& b2) { + EXPECT_EQ(b1.num(), b2.num()); + EXPECT_EQ(b1.channels(), b2.channels()); + EXPECT_EQ(b1.height(), b2.height()); + EXPECT_EQ(b1.width(), b2.width()); + for (int n = 0; n < b1.num(); ++n) { + for (int c = 0; c < b1.channels(); ++c) { + for (int h = 0; h < b1.height(); ++h) { + for (int w = 0; w < b1.width(); ++w) { + EXPECT_EQ(b1.data_at(n, c, h, w), b1.data_at(n, c, h, w)); + } + } + } + } +} + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(HDF5OutputLayerTest, Dtypes); + +TYPED_TEST(HDF5OutputLayerTest, TestForward) { + LOG(INFO) << "Loading HDF5 file " << this->input_file_name_; + hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY, + H5P_DEFAULT); + ASSERT_GE(file_id, 0) << "Failed to open HDF5 file" << + this->input_file_name_; + hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, + this->blob_data_); + hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, + this->blob_label_); + herr_t status = H5Fclose(file_id); + EXPECT_GE(status, 0) << "Failed to close HDF5 file " << + this->input_file_name_; + this->blob_bottom_vec_.push_back(this->blob_data_); + this->blob_bottom_vec_.push_back(this->blob_label_); + + Caffe::Brew modes[] = { Caffe::CPU, Caffe::GPU }; + for (int m = 0; m < 2; ++m) { + Caffe::set_mode(modes[m]); + LayerParameter param; + param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_); + // This code block ensures that the layer is deconstructed and + // the output hdf5 file is closed. + { + HDF5OutputLayer layer(param); + EXPECT_EQ(layer.file_name(), this->output_file_name_); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + } + hid_t file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY, + H5P_DEFAULT); + ASSERT_GE(file_id, 0) << "Failed to open HDF5 file" << + this->input_file_name_; + + Blob* blob_data = new Blob(); + hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, + blob_data); + this->CheckBlobEqual(*(this->blob_data_), *blob_data); + + Blob* blob_label = new Blob(); + hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, + blob_label); + this->CheckBlobEqual(*(this->blob_label_), *blob_label); + + herr_t status = H5Fclose(file_id); + EXPECT_GE(status, 0) << "Failed to close HDF5 file " << + this->output_file_name_; + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hdf5data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hdf5data_layer.cpp new file mode 100644 index 000000000..a0ed113b3 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hdf5data_layer.cpp @@ -0,0 +1,132 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "leveldb/db.h" + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/test/test_caffe_main.hpp" + +using std::string; + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class HDF5DataLayerTest : public ::testing::Test { + protected: + HDF5DataLayerTest() + : blob_top_data_(new Blob()), + blob_top_label_(new Blob()), + filename(NULL) {} + virtual void SetUp() { + blob_top_vec_.push_back(blob_top_data_); + blob_top_vec_.push_back(blob_top_label_); + + // Check out generate_sample_data.py in the same directory. + filename = new string("src/caffe/test/test_data/sample_data_list.txt"); + LOG(INFO) << "Using sample HDF5 data file " << filename; + } + + virtual ~HDF5DataLayerTest() { + delete blob_top_data_; + delete blob_top_label_; + delete filename; + } + + string* filename; + Blob* const blob_top_data_; + Blob* const blob_top_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(HDF5DataLayerTest, Dtypes); + +TYPED_TEST(HDF5DataLayerTest, TestRead) { + // Create LayerParameter with the known parameters. + // The data file we are reading has 10 rows and 8 columns, + // with values from 0 to 10*8 reshaped in row-major order. + LayerParameter param; + HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param(); + int batch_size = 5; + hdf5_data_param->set_batch_size(batch_size); + hdf5_data_param->set_source(*(this->filename)); + int num_rows = 10; + int num_cols = 8; + int height = 5; + int width = 5; + + // Test that the layer setup got the correct parameters. + HDF5DataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), batch_size); + EXPECT_EQ(this->blob_top_data_->channels(), num_cols); + EXPECT_EQ(this->blob_top_data_->height(), height); + EXPECT_EQ(this->blob_top_data_->width(), width); + + EXPECT_EQ(this->blob_top_label_->num(), batch_size); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + + for (int t = 0; t < 2; ++t) { + // TODO: make this a TypedTest instead of this silly loop. + if (t == 0) { + Caffe::set_mode(Caffe::CPU); + } else { + Caffe::set_mode(Caffe::GPU); + } + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + + // Go through the data 10 times (5 batches). + const int data_size = num_cols * height * width; + for (int iter = 0; iter < 10; ++iter) { + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + + // On even iterations, we're reading the first half of the data. + // On odd iterations, we're reading the second half of the data. + int label_offset = (iter % 2 == 0) ? 0 : batch_size; + int data_offset = (iter % 2 == 0) ? 0 : batch_size * data_size; + + // Every two iterations we are reading the second file, + // which has the same labels, but data is offset by total data size, + // which is 2000 (see generate_sample_data). + int file_offset = (iter % 4 < 2) ? 0 : 2000; + + for (int i = 0; i < batch_size; ++i) { + EXPECT_EQ( + label_offset + i, + this->blob_top_label_->cpu_data()[i]); + } + for (int i = 0; i < batch_size; ++i) { + for (int j = 0; j < num_cols; ++j) { + for (int h = 0; h < height; ++h) { + for (int w = 0; w < width; ++w) { + int idx = ( + i * num_cols * height * width + + j * height * width + + h * width + w); + EXPECT_EQ( + file_offset + data_offset + idx, + this->blob_top_data_->cpu_data()[idx]) + << "debug: i " << i << " j " << j + << " iter " << iter << " t " << t; + } + } + } + } + } + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hinge_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hinge_loss_layer.cpp new file mode 100644 index 000000000..318030b67 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_hinge_loss_layer.cpp @@ -0,0 +1,101 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class HingeLossLayerTest : public ::testing::Test { + protected: + HingeLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)) { + // fill the values + FillerParameter filler_param; + filler_param.set_std(10); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_label_->count(); ++i) { + blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + } + blob_bottom_vec_.push_back(blob_bottom_label_); + } + virtual ~HingeLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(HingeLossLayerTest, Dtypes); + + +TYPED_TEST(HingeLossLayerTest, TestGradientL1CPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + HingeLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-3, 1701, 1, 0.01); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + +TYPED_TEST(HingeLossLayerTest, TestGradientL1GPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + HingeLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-3, 1701, 1, 0.01); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + + +TYPED_TEST(HingeLossLayerTest, TestGradientL2CPU) { + LayerParameter layer_param; + // Set norm to L2 + HingeLossParameter* hinge_loss_param = layer_param.mutable_hinge_loss_param(); + hinge_loss_param->set_norm(HingeLossParameter_Norm_L2); + Caffe::set_mode(Caffe::CPU); + HingeLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 2e-3, 1701); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + + +TYPED_TEST(HingeLossLayerTest, TestGradientL2GPU) { + LayerParameter layer_param; + // Set norm to L2 + HingeLossParameter* hinge_loss_param = layer_param.mutable_hinge_loss_param(); + hinge_loss_param->set_norm(HingeLossParameter_Norm_L2); + Caffe::set_mode(Caffe::GPU); + HingeLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 2e-3, 1701); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_im2col_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_im2col_layer.cpp new file mode 100644 index 000000000..7f677ca03 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_im2col_layer.cpp @@ -0,0 +1,118 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class Im2colLayerTest : public ::testing::Test { + protected: + Im2colLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~Im2colLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(Im2colLayerTest, Dtypes); + +TYPED_TEST(Im2colLayerTest, TestSetup) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + Im2colLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 27); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(Im2colLayerTest, TestCPU) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + Im2colLayer layer(layer_param); + Caffe::set_mode(Caffe::CPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // We are lazy and will only check the top left block + for (int c = 0; c < 27; ++c) { + EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), + this->blob_bottom_->data_at(0, (c / 9), (c / 3) % 3, c % 3)); + } +} + +TYPED_TEST(Im2colLayerTest, TestGPU) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + Im2colLayer layer(layer_param); + Caffe::set_mode(Caffe::GPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // We are lazy and will only check the top left block + for (int c = 0; c < 27; ++c) { + EXPECT_EQ(this->blob_bottom_->data_at(0, (c / 9), (c / 3) % 3, c % 3), + this->blob_top_->data_at(0, c, 0, 0)); + } +} + +TYPED_TEST(Im2colLayerTest, TestCPUGradient) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + Caffe::set_mode(Caffe::CPU); + Im2colLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(Im2colLayerTest, TestGPUGradient) { + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); + Caffe::set_mode(Caffe::GPU); + Im2colLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_image_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_image_data_layer.cpp new file mode 100644 index 000000000..b823f6c4b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_image_data_layer.cpp @@ -0,0 +1,292 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include // NOLINT(readability/streams) +#include // NOLINT(readability/streams) +#include +#include +#include + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/test/test_caffe_main.hpp" + +using std::map; +using std::string; + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class ImageDataLayerTest : public ::testing::Test { + protected: + ImageDataLayerTest() + : blob_top_data_(new Blob()), + blob_top_label_(new Blob()), + filename_(new string(tmpnam(NULL))), + seed_(1701) {} + virtual void SetUp() { + blob_top_vec_.push_back(blob_top_data_); + blob_top_vec_.push_back(blob_top_label_); + Caffe::set_random_seed(seed_); + // Create a Vector of files with labels + std::ofstream outfile(filename_->c_str(), std::ofstream::out); + LOG(INFO) << "Using temporary file " << *filename_; + for (int i = 0; i < 5; ++i) { + outfile << "examples/images/cat.jpg " << i; + labels_.push_back(i); + } + outfile.close(); + image_ = cv::imread("examples/images/cat.jpg", CV_LOAD_IMAGE_COLOR); + } + + virtual ~ImageDataLayerTest() { + delete blob_top_data_; + delete blob_top_label_; + } + + int seed_; + shared_ptr filename_; + Blob* const blob_top_data_; + Blob* const blob_top_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + cv::Mat image_; + vector labels_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(ImageDataLayerTest, Dtypes); + +TYPED_TEST(ImageDataLayerTest, TestRead) { + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_source(this->filename_->c_str()); + image_data_param->set_shuffle(false); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 360); + EXPECT_EQ(this->blob_top_data_->width(), 480); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + cv::Mat image = this->image_; + // Go through the data 5 times + for (int iter = 0; iter < 5; ++iter) { + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + const TypeParam* data = this->blob_top_data_->cpu_data(); + for (int i = 0, index = 0; i < 5; ++i) { + EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); + for (int c = 0; c < 3; ++c) { + for (int h = 0; h < image.rows; ++h) { + for (int w = 0; w < image.cols; ++w) { + EXPECT_EQ(static_cast(image.at(h, w)[c]), + static_cast(data[index++])); + } + } + } + } + } +} + +TYPED_TEST(ImageDataLayerTest, TestResize) { + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_source(this->filename_->c_str()); + image_data_param->set_new_height(256); + image_data_param->set_new_width(256); + image_data_param->set_shuffle(false); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 256); + EXPECT_EQ(this->blob_top_data_->width(), 256); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + // Go through the data twice + for (int iter = 0; iter < 2; ++iter) { + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); + } + } +} + +TYPED_TEST(ImageDataLayerTest, TestShuffle) { + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_source(this->filename_->c_str()); + image_data_param->set_shuffle(true); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), 360); + EXPECT_EQ(this->blob_top_data_->width(), 480); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + cv::Mat image = this->image_; + // Go through the data twice + for (int iter = 0; iter < 2; ++iter) { + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + map values_to_indices; + int num_in_order = 0; + for (int i = 0; i < 5; ++i) { + TypeParam value = this->blob_top_label_->cpu_data()[i]; + // Check that the value has not been seen already (no duplicates). + EXPECT_EQ(values_to_indices.find(value), values_to_indices.end()); + values_to_indices[value] = i; + num_in_order += (value == TypeParam(i)); + } + EXPECT_EQ(5, values_to_indices.size()); + EXPECT_GT(5, num_in_order); + const TypeParam* data = this->blob_top_data_->cpu_data(); + for (int i = 0, index = 0; i < 5; ++i) { + EXPECT_GE(this->blob_top_label_->cpu_data()[i], 0); + EXPECT_LE(this->blob_top_label_->cpu_data()[i], 5); + for (int c = 0; c < 3; ++c) { + for (int h = 0; h < image.rows; ++h) { + for (int w = 0; w < image.cols; ++w) { + EXPECT_EQ(static_cast(image.at(h, w)[c]), + data[index++]); + } + } + } + } + } +} + +TYPED_TEST(ImageDataLayerTest, TestAddImagesAndLabels) { + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_shuffle(true); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 0); + EXPECT_EQ(this->blob_top_data_->channels(), 0); + EXPECT_EQ(this->blob_top_data_->height(), 0); + EXPECT_EQ(this->blob_top_data_->width(), 0); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + cv::Mat image = this->image_; + vector images(5, image); + layer.AddImagesAndLabels(images, this->labels_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), this->image_.rows); + EXPECT_EQ(this->blob_top_data_->width(), this->image_.cols); + // Go through the data 5 times + for (int iter = 0; iter < 5; ++iter) { + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + const TypeParam* data = this->blob_top_data_->cpu_data(); + for (int i = 0, index = 0; i < 5; ++i) { + EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); + for (int c = 0; c < 3; ++c) { + for (int h = 0; h < image.rows; ++h) { + for (int w = 0; w < image.cols; ++w) { + EXPECT_EQ(static_cast(image.at(h, w)[c]), + data[index++]); + } + } + } + } + } +} + +TYPED_TEST(ImageDataLayerTest, TestAddImagesAndLabelsResize) { + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_shuffle(false); + image_data_param->set_new_height(256); + image_data_param->set_new_width(256); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 0); + EXPECT_EQ(this->blob_top_data_->channels(), 0); + EXPECT_EQ(this->blob_top_data_->height(), 0); + EXPECT_EQ(this->blob_top_data_->width(), 0); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + cv::Mat image = this->image_; + vector images(5, image); + layer.AddImagesAndLabels(images, this->labels_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), image_data_param->new_height()); + EXPECT_EQ(this->blob_top_data_->width(), image_data_param->new_width()); + // Go through the data 50 times + for (int iter = 0; iter < 5; ++iter) { + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + const TypeParam* data = this->blob_top_data_->cpu_data(); + for (int i = 0, index = 0; i < 5; ++i) { + EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); + } + } +} + +TYPED_TEST(ImageDataLayerTest, TestAddImagesAndLabelsShuffle) { + LayerParameter param; + ImageDataParameter* image_data_param = param.mutable_image_data_param(); + image_data_param->set_batch_size(5); + image_data_param->set_shuffle(true); + ImageDataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + EXPECT_EQ(this->blob_top_data_->num(), 0); + EXPECT_EQ(this->blob_top_data_->channels(), 0); + EXPECT_EQ(this->blob_top_data_->height(), 0); + EXPECT_EQ(this->blob_top_data_->width(), 0); + EXPECT_EQ(this->blob_top_label_->num(), 5); + EXPECT_EQ(this->blob_top_label_->channels(), 1); + EXPECT_EQ(this->blob_top_label_->height(), 1); + EXPECT_EQ(this->blob_top_label_->width(), 1); + cv::Mat image = this->image_; + vector images(5, image); + layer.AddImagesAndLabels(images, this->labels_); + EXPECT_EQ(this->blob_top_data_->num(), 5); + EXPECT_EQ(this->blob_top_data_->channels(), 3); + EXPECT_EQ(this->blob_top_data_->height(), this->image_.rows); + EXPECT_EQ(this->blob_top_data_->width(), this->image_.cols); + // Go through the data 5 times + for (int iter = 0; iter < 5; ++iter) { + layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + const TypeParam* data = this->blob_top_data_->cpu_data(); + for (int i = 0, index = 0; i < 5; ++i) { + EXPECT_GE(this->blob_top_label_->cpu_data()[i], 0); + EXPECT_LE(this->blob_top_label_->cpu_data()[i], 5); + for (int c = 0; c < 3; ++c) { + for (int h = 0; h < image.rows; ++h) { + for (int w = 0; w < image.cols; ++w) { + EXPECT_EQ(static_cast(image.at(h, w)[c]), + data[index++]); + } + } + } + } + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_inner_product_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_inner_product_layer.cpp new file mode 100644 index 000000000..91917df6c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_inner_product_layer.cpp @@ -0,0 +1,137 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class InnerProductLayerTest : public ::testing::Test { + protected: + InnerProductLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~InnerProductLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(InnerProductLayerTest, Dtypes); + +TYPED_TEST(InnerProductLayerTest, TestSetUp) { + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + shared_ptr > layer( + new InnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_->channels(), 10); +} + +TYPED_TEST(InnerProductLayerTest, TestCPU) { + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + Caffe::set_mode(Caffe::CPU); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new InnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); + } +} + +TYPED_TEST(InnerProductLayerTest, TestGPU) { + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + Caffe::set_mode(Caffe::GPU); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new InnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); + } + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + +TYPED_TEST(InnerProductLayerTest, TestCPUGradient) { + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + Caffe::set_mode(Caffe::CPU); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + InnerProductLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(InnerProductLayerTest, TestGPUGradient) { + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + Caffe::set_mode(Caffe::GPU); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_type("gaussian"); + InnerProductLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradient(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_lrn_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_lrn_layer.cpp new file mode 100644 index 000000000..1923128dd --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_lrn_layer.cpp @@ -0,0 +1,282 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +using std::min; +using std::max; + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class LRNLayerTest : public ::testing::Test { + protected: + LRNLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()), + epsilon_(Dtype(1e-5)) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 7, 3, 3); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~LRNLayerTest() { delete blob_bottom_; delete blob_top_; } + void ReferenceLRNForward(const Blob& blob_bottom, + const LayerParameter& layer_param, Blob* blob_top); + + Dtype epsilon_; + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +template +void LRNLayerTest::ReferenceLRNForward( + const Blob& blob_bottom, const LayerParameter& layer_param, + Blob* blob_top) { + blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(), + blob_bottom.height(), blob_bottom.width()); + const Dtype* bottom_data = blob_bottom.cpu_data(); + Dtype* top_data = blob_top->mutable_cpu_data(); + LRNParameter lrn_param = layer_param.lrn_param(); + Dtype alpha = lrn_param.alpha(); + Dtype beta = lrn_param.beta(); + int size = lrn_param.local_size(); + switch (lrn_param.norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + for (int n = 0; n < blob_bottom.num(); ++n) { + for (int c = 0; c < blob_bottom.channels(); ++c) { + for (int h = 0; h < blob_bottom.height(); ++h) { + for (int w = 0; w < blob_bottom.width(); ++w) { + int c_start = c - (size - 1) / 2; + int c_end = min(c_start + size, blob_bottom.channels()); + c_start = max(c_start, 0); + Dtype scale = 1.; + for (int i = c_start; i < c_end; ++i) { + Dtype value = blob_bottom.data_at(n, i, h, w); + scale += value * value * alpha / size; + } + *(top_data + blob_top->offset(n, c, h, w)) = + blob_bottom.data_at(n, c, h, w) / pow(scale, beta); + } + } + } + } + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + for (int n = 0; n < blob_bottom.num(); ++n) { + for (int c = 0; c < blob_bottom.channels(); ++c) { + for (int h = 0; h < blob_bottom.height(); ++h) { + int h_start = h - (size - 1) / 2; + int h_end = min(h_start + size, blob_bottom.height()); + h_start = max(h_start, 0); + for (int w = 0; w < blob_bottom.width(); ++w) { + Dtype scale = 1.; + int w_start = w - (size - 1) / 2; + int w_end = min(w_start + size, blob_bottom.width()); + w_start = max(w_start, 0); + for (int nh = h_start; nh < h_end; ++nh) { + for (int nw = w_start; nw < w_end; ++nw) { + Dtype value = blob_bottom.data_at(n, c, nh, nw); + scale += value * value * alpha / (size * size); + } + } + *(top_data + blob_top->offset(n, c, h, w)) = + blob_bottom.data_at(n, c, h, w) / pow(scale, beta); + } + } + } + } + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(LRNLayerTest, Dtypes); + +TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) { + LayerParameter layer_param; + LRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 7); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(LRNLayerTest, TestCPUForwardAcrossChannels) { + LayerParameter layer_param; + LRNLayer layer(layer_param); + Caffe::set_mode(Caffe::CPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(LRNLayerTest, TestGPUForwardAcrossChannels) { + LayerParameter layer_param; + LRNLayer layer(layer_param); + Caffe::set_mode(Caffe::GPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(LRNLayerTest, TestCPUGradientAcrossChannels) { + LayerParameter layer_param; + LRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + Caffe::set_mode(Caffe::CPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(LRNLayerTest, TestGPUGradientAcrossChannels) { + LayerParameter layer_param; + LRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + Caffe::set_mode(Caffe::GPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "GPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) { + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + LRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 7); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(LRNLayerTest, TestCPUForwardWithinChannel) { + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + LRNLayer layer(layer_param); + Caffe::set_mode(Caffe::CPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(LRNLayerTest, TestGPUForwardWithinChannel) { + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + LRNLayer layer(layer_param); + Caffe::set_mode(Caffe::GPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(LRNLayerTest, TestCPUGradientWithinChannel) { + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + LRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + Caffe::set_mode(Caffe::CPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(LRNLayerTest, TestGPUGradientWithinChannel) { + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + LRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + Caffe::set_mode(Caffe::GPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_math_functions.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_math_functions.cpp new file mode 100644 index 000000000..d0265767c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_math_functions.cpp @@ -0,0 +1,230 @@ +// Copyright 2014 BVLC and contributors. + +#include // for uint32_t & uint64_t +#include +#include +#include // for std::fabs +#include // for rand_r + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class MathFunctionsTest : public ::testing::Test { + protected: + MathFunctionsTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) { + } + + virtual void SetUp() { + Caffe::set_random_seed(1701); + this->blob_bottom_->Reshape(11, 17, 19, 23); + this->blob_top_->Reshape(11, 17, 19, 23); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_top_); + } + + virtual ~MathFunctionsTest() { + delete blob_bottom_; + delete blob_top_; + } + + // http://en.wikipedia.org/wiki/Hamming_distance + int ReferenceHammingDistance(const int n, const Dtype* x, const Dtype* y) { + int dist = 0; + uint64_t val; + for (int i = 0; i < n; ++i) { + if (sizeof(Dtype) == 8) { + val = static_cast(x[i]) ^ static_cast(y[i]); + } else if (sizeof(Dtype) == 4) { + val = static_cast(x[i]) ^ static_cast(y[i]); + } else { + LOG(FATAL) << "Unrecognized Dtype size: " << sizeof(Dtype); + } + // Count the number of set bits + while (val) { + ++dist; + val &= val - 1; + } + } + return dist; + } + + Blob* const blob_bottom_; + Blob* const blob_top_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(MathFunctionsTest, Dtypes); + +TYPED_TEST(MathFunctionsTest, TestNothing) { + // The first test case of a test suite takes the longest time + // due to the set up overhead. +} + +TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + const TypeParam* y = this->blob_top_->cpu_data(); + EXPECT_EQ(this->ReferenceHammingDistance(n, x, y), + caffe_cpu_hamming_distance(n, x, y)); +} + +// TODO: Fix caffe_gpu_hamming_distance and re-enable this test. +TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + const TypeParam* y = this->blob_top_->cpu_data(); + int reference_distance = this->ReferenceHammingDistance(n, x, y); + x = this->blob_bottom_->gpu_data(); + y = this->blob_top_->gpu_data(); + int computed_distance = caffe_gpu_hamming_distance(n, x, y); + EXPECT_EQ(reference_distance, computed_distance); +} + +TYPED_TEST(MathFunctionsTest, TestAsumCPU) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + TypeParam std_asum = 0; + for (int i = 0; i < n; ++i) { + std_asum += std::fabs(x[i]); + } + TypeParam cpu_asum = caffe_cpu_asum(n, x); + EXPECT_LT((cpu_asum - std_asum) / std_asum, 1e-2); +} + +TYPED_TEST(MathFunctionsTest, TestAsumGPU) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + TypeParam std_asum = 0; + for (int i = 0; i < n; ++i) { + std_asum += std::fabs(x[i]); + } + TypeParam gpu_asum; + caffe_gpu_asum(n, this->blob_bottom_->gpu_data(), &gpu_asum); + EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); +} + +TYPED_TEST(MathFunctionsTest, TestSignCPU) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); + const TypeParam* signs = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(signs[i], x[i] > 0 ? 1 : (x[i] < 0 ? -1 : 0)); + } +} + +TYPED_TEST(MathFunctionsTest, TestSignGPU) { + int n = this->blob_bottom_->count(); + caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), + this->blob_bottom_->mutable_gpu_diff()); + const TypeParam* signs = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(signs[i], x[i] > 0 ? 1 : (x[i] < 0 ? -1 : 0)); + } +} + +TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); + const TypeParam* signbits = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(signbits[i], x[i] < 0 ? 1 : 0); + } +} + +TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { + int n = this->blob_bottom_->count(); + caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), + this->blob_bottom_->mutable_gpu_diff()); + const TypeParam* signbits = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(signbits[i], x[i] < 0 ? 1 : 0); + } +} + +TYPED_TEST(MathFunctionsTest, TestFabsCPU) { + int n = this->blob_bottom_->count(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + caffe_cpu_fabs(n, x, this->blob_bottom_->mutable_cpu_diff()); + const TypeParam* abs_val = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(abs_val[i], x[i] > 0 ? x[i] : -x[i]); + } +} + +TYPED_TEST(MathFunctionsTest, TestFabsGPU) { + int n = this->blob_bottom_->count(); + caffe_gpu_fabs(n, this->blob_bottom_->gpu_data(), + this->blob_bottom_->mutable_gpu_diff()); + const TypeParam* abs_val = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(abs_val[i], x[i] > 0 ? x[i] : -x[i]); + } +} + +TYPED_TEST(MathFunctionsTest, TestScaleCPU) { + int n = this->blob_bottom_->count(); + TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % + this->blob_bottom_->count()]; + caffe_cpu_scale(n, alpha, this->blob_bottom_->cpu_data(), + this->blob_bottom_->mutable_cpu_diff()); + const TypeParam* scaled = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(scaled[i], x[i] * alpha); + } +} + +TYPED_TEST(MathFunctionsTest, TestScaleGPU) { + int n = this->blob_bottom_->count(); + TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % + this->blob_bottom_->count()]; + caffe_gpu_scale(n, alpha, this->blob_bottom_->gpu_data(), + this->blob_bottom_->mutable_gpu_diff()); + const TypeParam* scaled = this->blob_bottom_->cpu_diff(); + const TypeParam* x = this->blob_bottom_->cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(scaled[i], x[i] * alpha); + } +} + +TYPED_TEST(MathFunctionsTest, TestCopyCPU) { + const int n = this->blob_bottom_->count(); + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + TypeParam* top_data = this->blob_top_->mutable_cpu_data(); + caffe_copy(n, bottom_data, top_data); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(bottom_data[i], top_data[i]); + } +} + +TYPED_TEST(MathFunctionsTest, TestCopyGPU) { + const int n = this->blob_bottom_->count(); + const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); + TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + caffe_gpu_copy(n, bottom_data, top_data); + bottom_data = this->blob_bottom_->cpu_data(); + top_data = this->blob_top_->mutable_cpu_data(); + for (int i = 0; i < n; ++i) { + EXPECT_EQ(bottom_data[i], top_data[i]); + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_maxpool_dropout_layers.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_maxpool_dropout_layers.cpp new file mode 100644 index 000000000..3862e1269 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -0,0 +1,188 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class MaxPoolingDropoutTest : public ::testing::Test { + protected: + MaxPoolingDropoutTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1703); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MaxPoolingDropoutTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(MaxPoolingDropoutTest, Dtypes); + +TYPED_TEST(MaxPoolingDropoutTest, TestSetup) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer max_layer(layer_param); + max_layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + DropoutLayer dropout_layer(layer_param); + dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + + +TYPED_TEST(MaxPoolingDropoutTest, CPUForward) { + Caffe::set_mode(Caffe::CPU); + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* top_data = this->blob_top_->cpu_data(); + TypeParam sum = 0.; + for (int i = 0; i < this->blob_top_->count(); ++i) { + sum += top_data[i]; + } + EXPECT_EQ(sum, this->blob_top_->count()); + // Dropout in-place + DropoutLayer dropout_layer(layer_param); + dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_)); + sum = 0.; + TypeParam scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); + top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + sum += top_data[i]; + } + EXPECT_GE(sum, 0); + EXPECT_LE(sum, this->blob_top_->count()*scale); +} + +TYPED_TEST(MaxPoolingDropoutTest, GPUForward) { + Caffe::set_mode(Caffe::GPU); + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* top_data = this->blob_top_->cpu_data(); + TypeParam sum = 0.; + for (int i = 0; i < this->blob_top_->count(); ++i) { + sum += top_data[i]; + } + EXPECT_EQ(sum, this->blob_top_->count()); + + DropoutLayer dropout_layer(layer_param); + dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_)); + sum = 0.; + TypeParam scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); + top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + sum += top_data[i]; + } + EXPECT_GE(sum, 0); + EXPECT_LE(sum, this->blob_top_->count()*scale); +} + +TYPED_TEST(MaxPoolingDropoutTest, CPUBackward) { + Caffe::set_mode(Caffe::CPU); + Caffe::set_phase(Caffe::TRAIN); + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + const TypeParam* bottom_diff = this->blob_bottom_->cpu_diff(); + TypeParam sum = 0.; + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + sum += bottom_diff[i]; + } + EXPECT_EQ(sum, this->blob_top_->count()); + // Dropout in-place + DropoutLayer dropout_layer(layer_param); + dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.Backward(this->blob_top_vec_, true, &(this->blob_top_vec_)); + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + TypeParam sum_with_dropout = 0.; + bottom_diff = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + sum_with_dropout += bottom_diff[i]; + } + EXPECT_GE(sum_with_dropout, sum); +} + +TYPED_TEST(MaxPoolingDropoutTest, GPUBackward) { + Caffe::set_mode(Caffe::GPU); + Caffe::set_phase(Caffe::TRAIN); + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + const TypeParam* bottom_diff = this->blob_bottom_->cpu_diff(); + TypeParam sum = 0.; + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + sum += bottom_diff[i]; + } + EXPECT_EQ(sum, this->blob_top_->count()); + // Dropout in-place + DropoutLayer dropout_layer(layer_param); + dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.Backward(this->blob_top_vec_, true, &(this->blob_top_vec_)); + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + TypeParam sum_with_dropout = 0.; + bottom_diff = this->blob_bottom_->cpu_diff(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + sum_with_dropout += bottom_diff[i]; + } + EXPECT_GE(sum_with_dropout, sum); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_memory_data_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_memory_data_layer.cpp new file mode 100644 index 000000000..15f01bd41 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_memory_data_layer.cpp @@ -0,0 +1,108 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class MemoryDataLayerTest : public ::testing::Test { + protected: + MemoryDataLayerTest() + : data_blob_(new Blob()), + label_blob_(new Blob()), + data_(new Blob()), labels_(new Blob()) {} + virtual void SetUp() { + batch_size_ = 8; + batches_ = 12; + channels_ = 4; + height_ = 7; + width_ = 11; + blob_top_vec_.push_back(data_blob_); + blob_top_vec_.push_back(label_blob_); + // pick random input data + FillerParameter filler_param; + GaussianFiller filler(filler_param); + data_->Reshape(batches_ * batch_size_, channels_, height_, width_); + labels_->Reshape(batches_ * batch_size_, 1, 1, 1); + filler.Fill(this->data_); + filler.Fill(this->labels_); + } + + virtual ~MemoryDataLayerTest() { + delete data_blob_; + delete label_blob_; + delete data_; + delete labels_; + } + int batch_size_; + int batches_; + int channels_; + int height_; + int width_; + // we don't really need blobs for the input data, but it makes it + // easier to call Filler + Blob* const data_; + Blob* const labels_; + // blobs for the top of MemoryDataLayer + Blob* const data_blob_; + Blob* const label_blob_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(MemoryDataLayerTest, Dtypes); + +TYPED_TEST(MemoryDataLayerTest, TestSetup) { + LayerParameter layer_param; + MemoryDataParameter* md_param = layer_param.mutable_memory_data_param(); + md_param->set_batch_size(this->batch_size_); + md_param->set_channels(this->channels_); + md_param->set_height(this->height_); + md_param->set_width(this->width_); + shared_ptr > layer( + new MemoryDataLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->data_blob_->num(), this->batch_size_); + EXPECT_EQ(this->data_blob_->channels(), this->channels_); + EXPECT_EQ(this->data_blob_->height(), this->height_); + EXPECT_EQ(this->data_blob_->width(), this->width_); + EXPECT_EQ(this->label_blob_->num(), this->batch_size_); + EXPECT_EQ(this->label_blob_->channels(), 1); + EXPECT_EQ(this->label_blob_->height(), 1); + EXPECT_EQ(this->label_blob_->width(), 1); +} + +// run through a few batches and check that the right data appears +TYPED_TEST(MemoryDataLayerTest, TestForward) { + LayerParameter layer_param; + MemoryDataParameter* md_param = layer_param.mutable_memory_data_param(); + md_param->set_batch_size(this->batch_size_); + md_param->set_channels(this->channels_); + md_param->set_height(this->height_); + md_param->set_width(this->width_); + shared_ptr > layer( + new MemoryDataLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Reset(this->data_->mutable_cpu_data(), + this->labels_->mutable_cpu_data(), this->data_->num()); + for (int i = 0; i < this->batches_ * 6; ++i) { + int batch_num = i % this->batches_; + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int j = 0; j < this->data_blob_->count(); ++j) { + EXPECT_EQ(this->data_blob_->cpu_data()[j], + this->data_->cpu_data()[ + this->data_->offset(1) * this->batch_size_ * batch_num + j]); + } + for (int j = 0; j < this->label_blob_->count(); ++j) { + EXPECT_EQ(this->label_blob_->cpu_data()[j], + this->labels_->cpu_data()[this->batch_size_ * batch_num + j]); + } + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_multinomial_logistic_loss_layer.cpp new file mode 100644 index 000000000..aa475ca27 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -0,0 +1,63 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class MultinomialLogisticLossLayerTest : public ::testing::Test { + protected: + MultinomialLogisticLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + PositiveUnitballFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_label_->count(); ++i) { + blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + } + blob_bottom_vec_.push_back(blob_bottom_label_); + } + virtual ~MultinomialLogisticLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, Dtypes); + + +TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + MultinomialLogisticLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_net.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_net.cpp new file mode 100644 index 000000000..1e43a0384 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_net.cpp @@ -0,0 +1,107 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "gtest/gtest.h" +#include "caffe/common.hpp" +#include "caffe/net.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class NetTest : public ::testing::Test { + protected: + virtual void SetUp() { + const string& proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DUMMY_DATA " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'innerproduct' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layers: { " + " name: 'loss' " + " type: SOFTMAX_LOSS " + " bottom: 'innerproduct' " + " bottom: 'label' " + "} "; + NetParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + net_.reset(new Net(param)); + } + + shared_ptr > net_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(NetTest, Dtypes); + +TYPED_TEST(NetTest, TestHasBlob) { + EXPECT_TRUE(this->net_->has_blob("data")); + EXPECT_TRUE(this->net_->has_blob("label")); + EXPECT_TRUE(this->net_->has_blob("innerproduct")); + EXPECT_FALSE(this->net_->has_blob("loss")); +} + +TYPED_TEST(NetTest, TestGetBlob) { + EXPECT_EQ(this->net_->blob_by_name("data"), this->net_->blobs()[0]); + EXPECT_EQ(this->net_->blob_by_name("label"), this->net_->blobs()[1]); + EXPECT_EQ(this->net_->blob_by_name("innerproduct"), this->net_->blobs()[2]); + EXPECT_FALSE(this->net_->blob_by_name("loss")); +} + +TYPED_TEST(NetTest, TestHasLayer) { + EXPECT_TRUE(this->net_->has_layer("data")); + EXPECT_TRUE(this->net_->has_layer("innerproduct")); + EXPECT_TRUE(this->net_->has_layer("loss")); + EXPECT_FALSE(this->net_->has_layer("label")); +} + +TYPED_TEST(NetTest, TestGetLayerByName) { + EXPECT_EQ(this->net_->layer_by_name("data"), this->net_->layers()[0]); + EXPECT_EQ(this->net_->layer_by_name("innerproduct"), this->net_->layers()[1]); + EXPECT_EQ(this->net_->layer_by_name("loss"), this->net_->layers()[2]); + EXPECT_FALSE(this->net_->layer_by_name("label")); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_neuron_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_neuron_layer.cpp new file mode 100644 index 000000000..2210b4612 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_neuron_layer.cpp @@ -0,0 +1,308 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class NeuronLayerTest : public ::testing::Test { + protected: + NeuronLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~NeuronLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(NeuronLayerTest, Dtypes); + +TYPED_TEST(NeuronLayerTest, TestReLUCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); + } +} + + +TYPED_TEST(NeuronLayerTest, TestReLUGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +TYPED_TEST(NeuronLayerTest, TestReLUGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + ReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); + } +} + + +TYPED_TEST(NeuronLayerTest, TestReLUGradientGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + ReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +TYPED_TEST(NeuronLayerTest, TestSigmoidCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SigmoidLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); + // check that we squashed the value between 0 and 1 + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + } +} + + +TYPED_TEST(NeuronLayerTest, TestSigmoidGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SigmoidLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + SigmoidLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); + // check that we squashed the value between 0 and 1 + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + } +} + + +TYPED_TEST(NeuronLayerTest, TestSigmoidGradientGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + SigmoidLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + + +TYPED_TEST(NeuronLayerTest, TestDropoutCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + Caffe::set_phase(Caffe::TRAIN); + DropoutLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (top_data[i] != 0) { + EXPECT_EQ(top_data[i], bottom_data[i] * scale); + } + } +} + + +TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + Caffe::set_phase(Caffe::TRAIN); + DropoutLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +TYPED_TEST(NeuronLayerTest, TestDropoutCPUTestPhase) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + Caffe::set_phase(Caffe::TEST); + DropoutLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (top_data[i] != 0) { + EXPECT_EQ(top_data[i], bottom_data[i]); + } + } +} + + +TYPED_TEST(NeuronLayerTest, TestDropoutGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + Caffe::set_phase(Caffe::TRAIN); + DropoutLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (top_data[i] != 0) { + EXPECT_EQ(top_data[i], bottom_data[i] * scale); + } + } +} + + +TYPED_TEST(NeuronLayerTest, TestDropoutGradientGPU) { + if (CAFFE_TEST_CUDA_PROP.major >= 2) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + Caffe::set_phase(Caffe::TRAIN); + DropoutLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + // it is too expensive to call curand multiple times, so we don't do an + // exhaustive gradient check. + checker.CheckGradient(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); + } else { + LOG(ERROR) << "Skipping test to spare my laptop."; + } +} + + +TYPED_TEST(NeuronLayerTest, TestDropoutGPUTestPhase) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + Caffe::set_phase(Caffe::TEST); + DropoutLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + float scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (top_data[i] != 0) { + EXPECT_EQ(top_data[i], bottom_data[i]); + } + } +} + + +TYPED_TEST(NeuronLayerTest, TestBNLLCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + BNLLLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_GE(top_data[i], bottom_data[i]); + } +} + + +TYPED_TEST(NeuronLayerTest, TestBNLLGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + BNLLLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +TYPED_TEST(NeuronLayerTest, TestBNLLGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + BNLLLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_GE(top_data[i], bottom_data[i]); + } +} + + +TYPED_TEST(NeuronLayerTest, TestBNLLGradientGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + BNLLLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_platform.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_platform.cpp new file mode 100644 index 000000000..c3868f34d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_platform.cpp @@ -0,0 +1,53 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "glog/logging.h" +#include "gtest/gtest.h" +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +class PlatformTest : public ::testing::Test {}; + +TEST_F(PlatformTest, TestInitialization) { + printf("Major revision number: %d\n", CAFFE_TEST_CUDA_PROP.major); + printf("Minor revision number: %d\n", CAFFE_TEST_CUDA_PROP.minor); + printf("Name: %s\n", CAFFE_TEST_CUDA_PROP.name); + printf("Total global memory: %lu\n", + CAFFE_TEST_CUDA_PROP.totalGlobalMem); + printf("Total shared memory per block: %lu\n", + CAFFE_TEST_CUDA_PROP.sharedMemPerBlock); + printf("Total registers per block: %d\n", + CAFFE_TEST_CUDA_PROP.regsPerBlock); + printf("Warp size: %d\n", + CAFFE_TEST_CUDA_PROP.warpSize); + printf("Maximum memory pitch: %lu\n", + CAFFE_TEST_CUDA_PROP.memPitch); + printf("Maximum threads per block: %d\n", + CAFFE_TEST_CUDA_PROP.maxThreadsPerBlock); + for (int i = 0; i < 3; ++i) + printf("Maximum dimension %d of block: %d\n", i, + CAFFE_TEST_CUDA_PROP.maxThreadsDim[i]); + for (int i = 0; i < 3; ++i) + printf("Maximum dimension %d of grid: %d\n", i, + CAFFE_TEST_CUDA_PROP.maxGridSize[i]); + printf("Clock rate: %d\n", CAFFE_TEST_CUDA_PROP.clockRate); + printf("Total constant memory: %lu\n", + CAFFE_TEST_CUDA_PROP.totalConstMem); + printf("Texture alignment: %lu\n", + CAFFE_TEST_CUDA_PROP.textureAlignment); + printf("Concurrent copy and execution: %s\n", + (CAFFE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No")); + printf("Number of multiprocessors: %d\n", + CAFFE_TEST_CUDA_PROP.multiProcessorCount); + printf("Kernel execution timeout: %s\n", + (CAFFE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No")); + EXPECT_TRUE(true); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_pooling_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_pooling_layer.cpp new file mode 100644 index 000000000..b13d11f6c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_pooling_layer.cpp @@ -0,0 +1,503 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class PoolingLayerTest : public ::testing::Test { + protected: + PoolingLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()), + blob_top_mask_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~PoolingLayerTest() { + delete blob_bottom_; + delete blob_top_; + delete blob_top_mask_; + } + Blob* const blob_bottom_; + Blob* const blob_top_; + Blob* const blob_top_mask_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + + void TestForward() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 3, 5); + // Input: 2x 2 channels of: + // [1 2 5 2 3] + // [9 4 1 4 8] + // [1 2 5 2 3] + for (int i = 0; i < 15 * num * channels; i += 15) { + blob_bottom_->mutable_cpu_data()[i + 0] = 1; + blob_bottom_->mutable_cpu_data()[i + 1] = 2; + blob_bottom_->mutable_cpu_data()[i + 2] = 5; + blob_bottom_->mutable_cpu_data()[i + 3] = 2; + blob_bottom_->mutable_cpu_data()[i + 4] = 3; + blob_bottom_->mutable_cpu_data()[i + 5] = 9; + blob_bottom_->mutable_cpu_data()[i + 6] = 4; + blob_bottom_->mutable_cpu_data()[i + 7] = 1; + blob_bottom_->mutable_cpu_data()[i + 8] = 4; + blob_bottom_->mutable_cpu_data()[i + 9] = 8; + blob_bottom_->mutable_cpu_data()[i + 10] = 1; + blob_bottom_->mutable_cpu_data()[i + 11] = 2; + blob_bottom_->mutable_cpu_data()[i + 12] = 5; + blob_bottom_->mutable_cpu_data()[i + 13] = 2; + blob_bottom_->mutable_cpu_data()[i + 14] = 3; + } + PoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 2); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 2); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, &blob_top_vec_); + // Expected output: 2x 2 channels of: + // [9 5 5 8] + // [9 5 5 8] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 8); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 8); + } + if (blob_top_vec_.size() > 1) { + // Expected mask output: 2x 2 channels of: + // [5 2 2 9] + // [5 12 12 9] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 5); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 2); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 2); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 9); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 5); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 9); + } + } + } +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(PoolingLayerTest, Dtypes); + +TYPED_TEST(PoolingLayerTest, TestSetup) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(PoolingLayerTest, TestSetupPadded) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +/* +TYPED_TEST(PoolingLayerTest, PrintGPUBackward) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + Caffe::set_mode(Caffe::GPU); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; + } + for (int i = 0; i < this->blob_top_->count(); ++i) { + cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl; + } + + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; + } +} +*/ + +/* +TYPED_TEST(PoolingLayerTest, PrintCPUBackward) { + LayerParameter layer_param; + layer_param.set_kernelsize(3); + layer_param.set_stride(2); + layer_param.set_pool(LayerParameter_PoolMethod_MAX); + Caffe::set_mode(Caffe::CPU); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; + } + for (int i = 0; i < this->blob_top_->count(); ++i) { + cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl; + } + + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = i; + } + layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; + } +} +*/ + +TYPED_TEST(PoolingLayerTest, TestCPUForwardMax) { + Caffe::set_mode(Caffe::CPU); + this->TestForward(); +} + +TYPED_TEST(PoolingLayerTest, TestGPUForwardMax) { + Caffe::set_mode(Caffe::GPU); + this->TestForward(); +} + +TYPED_TEST(PoolingLayerTest, TestCPUForwardMaxTopMask) { + Caffe::set_mode(Caffe::CPU); + this->blob_top_vec_.push_back(this->blob_top_mask_); + this->TestForward(); +} + +TYPED_TEST(PoolingLayerTest, TestGPUForwardMaxTopMask) { + Caffe::set_mode(Caffe::GPU); + this->blob_top_vec_.push_back(this->blob_top_mask_); + this->TestForward(); +} + +TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + Caffe::set_mode(Caffe::CPU); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + Caffe::set_mode(Caffe::GPU); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(PoolingLayerTest, TestCPUForwardMaxPadded) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + Caffe::set_mode(Caffe::CPU); + this->blob_bottom_->Reshape(1, 1, 3, 3); + // Input: + // [ 1 2 4 ] + // [ 2 3 2 ] + // [ 4 2 1 ] + this->blob_bottom_->mutable_cpu_data()[0] = 1; + this->blob_bottom_->mutable_cpu_data()[1] = 2; + this->blob_bottom_->mutable_cpu_data()[2] = 4; + this->blob_bottom_->mutable_cpu_data()[3] = 2; + this->blob_bottom_->mutable_cpu_data()[4] = 3; + this->blob_bottom_->mutable_cpu_data()[5] = 2; + this->blob_bottom_->mutable_cpu_data()[6] = 4; + this->blob_bottom_->mutable_cpu_data()[7] = 2; + this->blob_bottom_->mutable_cpu_data()[8] = 1; + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + TypeParam epsilon = 1e-8; + // Output: + // [ 1 4 4 ] + // [ 4 4 4 ] + // [ 4 4 1 ] + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); +} + + +TYPED_TEST(PoolingLayerTest, TestGPUForwardMaxPadded) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + Caffe::set_mode(Caffe::GPU); + this->blob_bottom_->Reshape(1, 1, 3, 3); + // Input: + // [ 1 2 4 ] + // [ 2 3 2 ] + // [ 4 2 1 ] + this->blob_bottom_->mutable_cpu_data()[0] = 1; + this->blob_bottom_->mutable_cpu_data()[1] = 2; + this->blob_bottom_->mutable_cpu_data()[2] = 4; + this->blob_bottom_->mutable_cpu_data()[3] = 2; + this->blob_bottom_->mutable_cpu_data()[4] = 3; + this->blob_bottom_->mutable_cpu_data()[5] = 2; + this->blob_bottom_->mutable_cpu_data()[6] = 4; + this->blob_bottom_->mutable_cpu_data()[7] = 2; + this->blob_bottom_->mutable_cpu_data()[8] = 1; + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + TypeParam epsilon = 1e-8; + // Output: + // [ 1 4 4 ] + // [ 4 4 4 ] + // [ 4 4 1 ] + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); +} + + +TYPED_TEST(PoolingLayerTest, TestCPUGradientMaxTopMask) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_top_vec_.push_back(this->blob_top_mask_); + Caffe::set_mode(Caffe::CPU); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(PoolingLayerTest, TestGPUGradientMaxTopMask) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_top_vec_.push_back(this->blob_top_mask_); + Caffe::set_mode(Caffe::GPU); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +TYPED_TEST(PoolingLayerTest, TestCPUForwardAve) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(1); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + Caffe::set_mode(Caffe::CPU); + this->blob_bottom_->Reshape(1, 1, 3, 3); + FillerParameter filler_param; + filler_param.set_value(TypeParam(2)); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + TypeParam epsilon = 1e-5; + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 2.0 , epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon); +} + + +TYPED_TEST(PoolingLayerTest, TestGPUForwardAve) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(1); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + Caffe::set_mode(Caffe::GPU); + this->blob_bottom_->Reshape(1, 1, 3, 3); + FillerParameter filler_param; + filler_param.set_value(TypeParam(2)); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + TypeParam epsilon = 1e-5; + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 2.0 , epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon); +} + + +TYPED_TEST(PoolingLayerTest, TestCPUGradientAve) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + Caffe::set_mode(Caffe::CPU); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +TYPED_TEST(PoolingLayerTest, TestGPUGradientAve) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + Caffe::set_mode(Caffe::GPU); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +TYPED_TEST(PoolingLayerTest, TestCPUGradientAvePadded) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + Caffe::set_mode(Caffe::CPU); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +TYPED_TEST(PoolingLayerTest, TestGPUGradientAvePadded) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + Caffe::set_mode(Caffe::GPU); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_power_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_power_layer.cpp new file mode 100644 index 000000000..99b127d3d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_power_layer.cpp @@ -0,0 +1,256 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +using std::isnan; + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class PowerLayerTest : public ::testing::Test { + protected: + PowerLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~PowerLayerTest() { delete blob_bottom_; delete blob_top_; } + + void TestForward(Dtype power, Dtype scale, Dtype shift) { + LayerParameter layer_param; + layer_param.mutable_power_param()->set_power(power); + layer_param.mutable_power_param()->set_scale(scale); + layer_param.mutable_power_param()->set_shift(shift); + PowerLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype min_precision = 1e-5; + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + Dtype expected_value = pow(shift + scale * bottom_data[i], power); + if (power == Dtype(0) || power == Dtype(1) || power == Dtype(2)) { + EXPECT_FALSE(isnan(top_data[i])); + } + if (isnan(expected_value)) { + EXPECT_TRUE(isnan(top_data[i])); + } else { + Dtype precision = max(Dtype(abs(expected_value * 0.0001)), + min_precision); + EXPECT_NEAR(expected_value, top_data[i], precision); + } + } + } + + void TestBackward(Dtype power, Dtype scale, Dtype shift) { + LayerParameter layer_param; + layer_param.mutable_power_param()->set_power(power); + layer_param.mutable_power_param()->set_scale(scale); + layer_param.mutable_power_param()->set_shift(shift); + PowerLayer layer(layer_param); + if (power != Dtype(0) && power != Dtype(1) && power != Dtype(2)) { + // Avoid NaNs by forcing (shift + scale * x) >= 0 + Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); + Dtype min_value = -shift / scale; + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + if (bottom_data[i] < min_value) { + bottom_data[i] = min_value + (min_value - bottom_data[i]); + } + } + } + GradientChecker checker(1e-2, 1e-2, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(PowerLayerTest, Dtypes); + +TYPED_TEST(PowerLayerTest, TestPowerCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 0.37; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerGradientCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 0.37; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZeroCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 0.37; + TypeParam scale = 0.83; + TypeParam shift = 0.0; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerZeroCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 0.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerZeroGradientCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 0.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerOneCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 1.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerOneGradientCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 1.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwoCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 2.0; + TypeParam scale = 0.34; + TypeParam shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwoGradientCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 2.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradientCPU) { + Caffe::set_mode(Caffe::CPU); + TypeParam power = 2.0; + TypeParam scale = 0.5; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 0.37; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerGradientGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 0.37; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerGradientShiftZeroGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 0.37; + TypeParam scale = 0.83; + TypeParam shift = 0.0; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerZeroGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 0.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerZeroGradientGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 0.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerOneGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 1.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerOneGradientGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 1.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwoGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 2.0; + TypeParam scale = 0.34; + TypeParam shift = -2.4; + this->TestForward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwoGradientGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 2.0; + TypeParam scale = 0.83; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +TYPED_TEST(PowerLayerTest, TestPowerTwoScaleHalfGradientGPU) { + Caffe::set_mode(Caffe::GPU); + TypeParam power = 2.0; + TypeParam scale = 0.5; + TypeParam shift = -2.4; + this->TestBackward(power, scale, shift); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_protobuf.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_protobuf.cpp new file mode 100644 index 000000000..182af2e46 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_protobuf.cpp @@ -0,0 +1,29 @@ +// Copyright 2014 BVLC and contributors. + +// This is simply a script that tries serializing protocol buffer in text +// format. Nothing special here and no actual code is being tested. +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +class ProtoTest : public ::testing::Test {}; + +TEST_F(ProtoTest, TestSerialization) { + LayerParameter param; + param.set_name("test"); + param.set_type(LayerParameter_LayerType_NONE); + std::cout << "Printing in binary format." << std::endl; + std::cout << param.SerializeAsString() << std::endl; + std::cout << "Printing in text format." << std::endl; + std::string str; + google::protobuf::TextFormat::PrintToString(param, &str); + std::cout << str << std::endl; + EXPECT_TRUE(true); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_random_number_generator.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_random_number_generator.cpp new file mode 100644 index 000000000..62daf6087 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_random_number_generator.cpp @@ -0,0 +1,519 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "gtest/gtest.h" +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class RandomNumberGeneratorTest : public ::testing::Test { + protected: + RandomNumberGeneratorTest() + : sample_size_(10000), + seed_(1701), + mean_bound_multiplier_(3.8), // ~99.99% confidence for test failure. + data_(new SyncedMemory(sample_size_ * sizeof(Dtype))), + data_2_(new SyncedMemory(sample_size_ * sizeof(Dtype))), + int_data_(new SyncedMemory(sample_size_ * sizeof(int))), + int_data_2_(new SyncedMemory(sample_size_ * sizeof(int))) {} + + virtual void SetUp() { + Caffe::set_random_seed(this->seed_); + } + + Dtype sample_mean(const Dtype* const seqs, const int sample_size) { + Dtype sum = 0; + for (int i = 0; i < sample_size; ++i) { + sum += seqs[i]; + } + return sum / sample_size; + } + + Dtype sample_mean(const Dtype* const seqs) { + return sample_mean(seqs, sample_size_); + } + + Dtype sample_mean(const int* const seqs, const int sample_size) { + Dtype sum = 0; + for (int i = 0; i < sample_size; ++i) { + sum += Dtype(seqs[i]); + } + return sum / sample_size; + } + + Dtype sample_mean(const int* const seqs) { + return sample_mean(seqs, sample_size_); + } + + Dtype mean_bound(const Dtype std, const int sample_size) { + return mean_bound_multiplier_ * std / sqrt(static_cast(sample_size)); + } + + Dtype mean_bound(const Dtype std) { + return mean_bound(std, sample_size_); + } + + void RngGaussianFill(const Dtype mu, const Dtype sigma, void* cpu_data) { + Dtype* rng_data = static_cast(cpu_data); + caffe_rng_gaussian(sample_size_, mu, sigma, rng_data); + } + + void RngGaussianFillGPU(const Dtype mu, const Dtype sigma, void* gpu_data) { + Dtype* rng_data = static_cast(gpu_data); + caffe_gpu_rng_gaussian(sample_size_, mu, sigma, rng_data); + } + + void RngGaussianChecks(const Dtype mu, const Dtype sigma, + const void* cpu_data, const Dtype sparse_p = 0) { + const Dtype* rng_data = static_cast(cpu_data); + const Dtype true_mean = mu; + const Dtype true_std = sigma; + // Check that sample mean roughly matches true mean. + const Dtype bound = this->mean_bound(true_std); + const Dtype sample_mean = this->sample_mean( + static_cast(cpu_data)); + EXPECT_NEAR(sample_mean, true_mean, bound); + // Check that roughly half the samples are above the true mean. + int num_above_mean = 0; + int num_below_mean = 0; + int num_mean = 0; + int num_nan = 0; + for (int i = 0; i < sample_size_; ++i) { + if (rng_data[i] > true_mean) { + ++num_above_mean; + } else if (rng_data[i] < true_mean) { + ++num_below_mean; + } else if (rng_data[i] == true_mean) { + ++num_mean; + } else { + ++num_nan; + } + } + EXPECT_EQ(0, num_nan); + if (sparse_p == Dtype(0)) { + EXPECT_EQ(0, num_mean); + } + const Dtype sample_p_above_mean = + static_cast(num_above_mean) / sample_size_; + const Dtype bernoulli_p = (1 - sparse_p) * 0.5; + const Dtype bernoulli_std = sqrt(bernoulli_p * (1 - bernoulli_p)); + const Dtype bernoulli_bound = this->mean_bound(bernoulli_std); + EXPECT_NEAR(bernoulli_p, sample_p_above_mean, bernoulli_bound); + } + + void RngUniformFill(const Dtype lower, const Dtype upper, void* cpu_data) { + CHECK_GE(upper, lower); + Dtype* rng_data = static_cast(cpu_data); + caffe_rng_uniform(sample_size_, lower, upper, rng_data); + } + + void RngUniformFillGPU(const Dtype lower, const Dtype upper, void* gpu_data) { + CHECK_GE(upper, lower); + Dtype* rng_data = static_cast(gpu_data); + caffe_gpu_rng_uniform(sample_size_, lower, upper, rng_data); + } + + // Fills with uniform integers in [0, UINT_MAX] using 2 argument form of + // caffe_gpu_rng_uniform. + void RngUniformIntFillGPU(void* gpu_data) { + unsigned int* rng_data = static_cast(gpu_data); + caffe_gpu_rng_uniform(sample_size_, rng_data); + } + + void RngUniformChecks(const Dtype lower, const Dtype upper, + const void* cpu_data, const Dtype sparse_p = 0) { + const Dtype* rng_data = static_cast(cpu_data); + const Dtype true_mean = (lower + upper) / 2; + const Dtype true_std = (upper - lower) / sqrt(12); + // Check that sample mean roughly matches true mean. + const Dtype bound = this->mean_bound(true_std); + const Dtype sample_mean = this->sample_mean(rng_data); + EXPECT_NEAR(sample_mean, true_mean, bound); + // Check that roughly half the samples are above the true mean, and none are + // above upper or below lower. + int num_above_mean = 0; + int num_below_mean = 0; + int num_mean = 0; + int num_nan = 0; + int num_above_upper = 0; + int num_below_lower = 0; + for (int i = 0; i < sample_size_; ++i) { + if (rng_data[i] > true_mean) { + ++num_above_mean; + } else if (rng_data[i] < true_mean) { + ++num_below_mean; + } else if (rng_data[i] == true_mean) { + ++num_mean; + } else { + ++num_nan; + } + if (rng_data[i] > upper) { + ++num_above_upper; + } else if (rng_data[i] < lower) { + ++num_below_lower; + } + } + EXPECT_EQ(0, num_nan); + EXPECT_EQ(0, num_above_upper); + EXPECT_EQ(0, num_below_lower); + if (sparse_p == Dtype(0)) { + EXPECT_EQ(0, num_mean); + } + const Dtype sample_p_above_mean = + static_cast(num_above_mean) / sample_size_; + const Dtype bernoulli_p = (1 - sparse_p) * 0.5; + const Dtype bernoulli_std = sqrt(bernoulli_p * (1 - bernoulli_p)); + const Dtype bernoulli_bound = this->mean_bound(bernoulli_std); + EXPECT_NEAR(bernoulli_p, sample_p_above_mean, bernoulli_bound); + } + + void RngBernoulliFill(const Dtype p, void* cpu_data) { + int* rng_data = static_cast(cpu_data); + caffe_rng_bernoulli(sample_size_, p, rng_data); + } + + void RngBernoulliChecks(const Dtype p, const void* cpu_data) { + const int* rng_data = static_cast(cpu_data); + const Dtype true_mean = p; + const Dtype true_std = sqrt(p * (1 - p)); + const Dtype bound = this->mean_bound(true_std); + const Dtype sample_mean = this->sample_mean(rng_data); + EXPECT_NEAR(sample_mean, true_mean, bound); + } + + int num_above_mean; + int num_below_mean; + + Dtype mean_bound_multiplier_; + + size_t sample_size_; + uint32_t seed_; + + shared_ptr data_; + shared_ptr data_2_; + shared_ptr int_data_; + shared_ptr int_data_2_; +}; + + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(RandomNumberGeneratorTest, Dtypes); + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian) { + const TypeParam mu = 0; + const TypeParam sigma = 1; + void* gaussian_data = this->data_->mutable_cpu_data(); + this->RngGaussianFill(mu, sigma, gaussian_data); + this->RngGaussianChecks(mu, sigma, gaussian_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2) { + const TypeParam mu = -2; + const TypeParam sigma = 3; + void* gaussian_data = this->data_->mutable_cpu_data(); + this->RngGaussianFill(mu, sigma, gaussian_data); + this->RngGaussianChecks(mu, sigma, gaussian_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform) { + const TypeParam lower = 0; + const TypeParam upper = 1; + void* uniform_data = this->data_->mutable_cpu_data(); + this->RngUniformFill(lower, upper, uniform_data); + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2) { + const TypeParam lower = -7.3; + const TypeParam upper = -2.3; + void* uniform_data = this->data_->mutable_cpu_data(); + this->RngUniformFill(lower, upper, uniform_data); + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli) { + const TypeParam p = 0.3; + void* bernoulli_data = this->int_data_->mutable_cpu_data(); + this->RngBernoulliFill(p, bernoulli_data); + this->RngBernoulliChecks(p, bernoulli_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulli2) { + const TypeParam p = 0.9; + void* bernoulli_data = this->int_data_->mutable_cpu_data(); + this->RngBernoulliFill(p, bernoulli_data); + this->RngBernoulliChecks(p, bernoulli_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussian) { + const TypeParam mu = 0; + const TypeParam sigma = 1; + + // Sample from 0 mean Gaussian. + TypeParam* gaussian_data_1 = + static_cast(this->data_->mutable_cpu_data()); + this->RngGaussianFill(mu, sigma, gaussian_data_1); + + // Sample from 0 mean Gaussian again. + TypeParam* gaussian_data_2 = + static_cast(this->data_2_->mutable_cpu_data()); + this->RngGaussianFill(mu, sigma, gaussian_data_2); + + // Multiply Gaussians. + for (int i = 0; i < this->sample_size_; ++i) { + gaussian_data_1[i] *= gaussian_data_2[i]; + } + + // Check that result has mean 0. + TypeParam mu_product = pow(mu, 2); + TypeParam sigma_product = sqrt(pow(sigma, 2) / 2); + this->RngGaussianChecks(mu_product, sigma_product, gaussian_data_1); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniform) { + // Sample from Uniform on [-2, 2]. + const TypeParam lower_1 = -2; + const TypeParam upper_1 = -lower_1; + TypeParam* uniform_data_1 = + static_cast(this->data_->mutable_cpu_data()); + this->RngUniformFill(lower_1, upper_1, uniform_data_1); + + // Sample from Uniform on [-3, 3]. + const TypeParam lower_2 = -3; + const TypeParam upper_2 = -lower_2; + TypeParam* uniform_data_2 = + static_cast(this->data_2_->mutable_cpu_data()); + this->RngUniformFill(lower_2, upper_2, uniform_data_2); + + // Multiply Uniforms. + for (int i = 0; i < this->sample_size_; ++i) { + uniform_data_1[i] *= uniform_data_2[i]; + } + + // Check that result does not violate checked properties of Uniform on [-6, 6] + // (though it is not actually uniformly distributed). + const TypeParam lower_prod = lower_1 * upper_2; + const TypeParam upper_prod = -lower_prod; + this->RngUniformChecks(lower_prod, upper_prod, uniform_data_1); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesBernoulli) { + // Sample from 0 mean Gaussian. + const TypeParam mu = 0; + const TypeParam sigma = 1; + TypeParam* gaussian_data = + static_cast(this->data_->mutable_cpu_data()); + this->RngGaussianFill(mu, sigma, gaussian_data); + + // Sample from Bernoulli with p = 0.3. + const TypeParam bernoulli_p = 0.3; + int* bernoulli_data = + static_cast(this->int_data_->mutable_cpu_data()); + this->RngBernoulliFill(bernoulli_p, bernoulli_data); + + // Multiply Gaussian by Bernoulli. + for (int i = 0; i < this->sample_size_; ++i) { + gaussian_data[i] *= bernoulli_data[i]; + } + + // Check that result does not violate checked properties of sparsified + // Gaussian (though it is not actually a Gaussian). + this->RngGaussianChecks(mu, sigma, gaussian_data, 1 - bernoulli_p); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesBernoulli) { + // Sample from Uniform on [-1, 1]. + const TypeParam lower = -1; + const TypeParam upper = 1; + TypeParam* uniform_data = + static_cast(this->data_->mutable_cpu_data()); + this->RngUniformFill(lower, upper, uniform_data); + + // Sample from Bernoulli with p = 0.3. + const TypeParam bernoulli_p = 0.3; + int* bernoulli_data = + static_cast(this->int_data_->mutable_cpu_data()); + this->RngBernoulliFill(bernoulli_p, bernoulli_data); + + // Multiply Uniform by Bernoulli. + for (int i = 0; i < this->sample_size_; ++i) { + uniform_data[i] *= bernoulli_data[i]; + } + + // Check that result does not violate checked properties of sparsified + // Uniform on [-1, 1] (though it is not actually uniformly distributed). + this->RngUniformChecks(lower, upper, uniform_data, 1 - bernoulli_p); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngBernoulliTimesBernoulli) { + // Sample from Bernoulli with p = 0.5. + const TypeParam p_a = 0.5; + int* bernoulli_data_a = + static_cast(this->int_data_->mutable_cpu_data()); + this->RngBernoulliFill(p_a, bernoulli_data_a); + + // Sample from Bernoulli with p = 0.3. + const TypeParam p_b = 0.3; + int* bernoulli_data_b = + static_cast(this->int_data_2_->mutable_cpu_data()); + this->RngBernoulliFill(p_b, bernoulli_data_b); + + // Multiply Bernoullis. + for (int i = 0; i < this->sample_size_; ++i) { + bernoulli_data_a[i] *= bernoulli_data_b[i]; + } + int num_ones = 0; + for (int i = 0; i < this->sample_size_; ++i) { + if (bernoulli_data_a[i] != TypeParam(0)) { + EXPECT_EQ(TypeParam(1), bernoulli_data_a[i]); + ++num_ones; + } + } + + // Check that resulting product has roughly p_a * p_b ones. + const TypeParam sample_p = this->sample_mean(bernoulli_data_a); + const TypeParam true_mean = p_a * p_b; + const TypeParam true_std = sqrt(true_mean * (1 - true_mean)); + const TypeParam bound = this->mean_bound(true_std); + EXPECT_NEAR(true_mean, sample_p, bound); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianGPU) { + const TypeParam mu = 0; + const TypeParam sigma = 1; + void* gaussian_gpu_data = this->data_->mutable_gpu_data(); + this->RngGaussianFillGPU(mu, sigma, gaussian_gpu_data); + const void* gaussian_data = this->data_->cpu_data(); + this->RngGaussianChecks(mu, sigma, gaussian_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussian2GPU) { + const TypeParam mu = -2; + const TypeParam sigma = 3; + void* gaussian_gpu_data = this->data_->mutable_gpu_data(); + this->RngGaussianFillGPU(mu, sigma, gaussian_gpu_data); + const void* gaussian_data = this->data_->cpu_data(); + this->RngGaussianChecks(mu, sigma, gaussian_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformGPU) { + const TypeParam lower = 0; + const TypeParam upper = 1; + void* uniform_gpu_data = this->data_->mutable_gpu_data(); + this->RngUniformFillGPU(lower, upper, uniform_gpu_data); + const void* uniform_data = this->data_->cpu_data(); + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniform2GPU) { + const TypeParam lower = -7.3; + const TypeParam upper = -2.3; + void* uniform_gpu_data = this->data_->mutable_gpu_data(); + this->RngUniformFillGPU(lower, upper, uniform_gpu_data); + const void* uniform_data = this->data_->cpu_data(); + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformIntGPU) { + unsigned int* uniform_uint_gpu_data = + static_cast(this->int_data_->mutable_gpu_data()); + this->RngUniformIntFillGPU(uniform_uint_gpu_data); + const unsigned int* uniform_uint_data = + static_cast(this->int_data_->cpu_data()); + TypeParam* uniform_data = + static_cast(this->data_->mutable_cpu_data()); + for (int i = 0; i < this->sample_size_; ++i) { + uniform_data[i] = static_cast(uniform_uint_data[i]); + } + const TypeParam lower = 0; + const TypeParam upper = UINT_MAX; + this->RngUniformChecks(lower, upper, uniform_data); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngGaussianTimesGaussianGPU) { + const TypeParam mu = 0; + const TypeParam sigma = 1; + + // Sample from 0 mean Gaussian. + TypeParam* gaussian_gpu_data_1 = + static_cast(this->data_->mutable_gpu_data()); + this->RngGaussianFillGPU(mu, sigma, gaussian_gpu_data_1); + + // Sample from 0 mean Gaussian again. + TypeParam* gaussian_gpu_data_2 = + static_cast(this->data_2_->mutable_gpu_data()); + this->RngGaussianFillGPU(mu, sigma, gaussian_gpu_data_2); + + // Multiply Gaussians. + TypeParam* gaussian_data_1 = + static_cast(this->data_->mutable_cpu_data()); + const TypeParam* gaussian_data_2 = + static_cast(this->data_2_->cpu_data()); + for (int i = 0; i < this->sample_size_; ++i) { + gaussian_data_1[i] *= gaussian_data_2[i]; + } + + // Check that result does not violate checked properties of Gaussian + // (though it is not actually a Gaussian). + TypeParam mu_product = pow(mu, 2); + TypeParam sigma_product = sqrt(pow(sigma, 2) / 2); + this->RngGaussianChecks(mu_product, sigma_product, gaussian_data_1); +} + + +TYPED_TEST(RandomNumberGeneratorTest, TestRngUniformTimesUniformGPU) { + // Sample from Uniform on [-2, 2]. + const TypeParam lower_1 = -2; + const TypeParam upper_1 = -lower_1; + TypeParam* uniform_gpu_data_1 = + static_cast(this->data_->mutable_gpu_data()); + this->RngUniformFillGPU(lower_1, upper_1, uniform_gpu_data_1); + + // Sample from Uniform on [-3, 3]. + const TypeParam lower_2 = -3; + const TypeParam upper_2 = -lower_2; + TypeParam* uniform_gpu_data_2 = + static_cast(this->data_2_->mutable_gpu_data()); + this->RngUniformFillGPU(lower_2, upper_2, uniform_gpu_data_2); + + // Multiply Uniforms. + TypeParam* uniform_data_1 = + static_cast(this->data_->mutable_cpu_data()); + const TypeParam* uniform_data_2 = + static_cast(this->data_2_->cpu_data()); + for (int i = 0; i < this->sample_size_; ++i) { + uniform_data_1[i] *= uniform_data_2[i]; + } + + // Check that result does not violate properties of Uniform on [-7, -3]. + const TypeParam lower_prod = lower_1 * upper_2; + const TypeParam upper_prod = -lower_prod; + this->RngUniformChecks(lower_prod, upper_prod, uniform_data_1); +} + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp new file mode 100644 index 000000000..d8018be0c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp @@ -0,0 +1,134 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class SigmoidCrossEntropyLossLayerTest : public ::testing::Test { + protected: + SigmoidCrossEntropyLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_targets_(new Blob(10, 5, 1, 1)) { + // Fill the data vector + FillerParameter data_filler_param; + data_filler_param.set_std(1); + GaussianFiller data_filler(data_filler_param); + data_filler.Fill(blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + // Fill the targets vector + FillerParameter targets_filler_param; + targets_filler_param.set_min(0); + targets_filler_param.set_max(1); + UniformFiller targets_filler(targets_filler_param); + targets_filler.Fill(blob_bottom_targets_); + blob_bottom_vec_.push_back(blob_bottom_targets_); + } + virtual ~SigmoidCrossEntropyLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_targets_; + } + + Dtype SigmoidCrossEntropyLossReference(const int count, const int num, + const Dtype* input, + const Dtype* target) { + Dtype loss = 0; + for (int i = 0; i < count; ++i) { + const Dtype prediction = 1 / (1 + exp(-input[i])); + EXPECT_LE(prediction, 1); + EXPECT_GE(prediction, 0); + EXPECT_LE(target[i], 1); + EXPECT_GE(target[i], 0); + loss -= target[i] * log(prediction + (target[i] == Dtype(0))); + loss -= (1 - target[i]) * log(1 - prediction + (target[i] == Dtype(1))); + } + return loss / num; + } + + void TestForward() { + LayerParameter layer_param; + FillerParameter data_filler_param; + data_filler_param.set_std(1); + GaussianFiller data_filler(data_filler_param); + FillerParameter targets_filler_param; + targets_filler_param.set_min(0.0); + targets_filler_param.set_max(1.0); + UniformFiller targets_filler(targets_filler_param); + Dtype eps = 2e-2; + int num_inf = 0; + for (int i = 0; i < 100; ++i) { + // Fill the data vector + data_filler.Fill(this->blob_bottom_data_); + // Fill the targets vector + targets_filler.Fill(this->blob_bottom_targets_); + SigmoidCrossEntropyLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + Dtype layer_loss = + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const int count = this->blob_bottom_data_->count(); + const int num = this->blob_bottom_data_->num(); + const Dtype* blob_bottom_data = this->blob_bottom_data_->cpu_data(); + const Dtype* blob_bottom_targets = + this->blob_bottom_targets_->cpu_data(); + Dtype reference_loss = this->SigmoidCrossEntropyLossReference( + count, num, blob_bottom_data, blob_bottom_targets); + EXPECT_NEAR(reference_loss, layer_loss, eps) << "debug: trial #" << i; + } + } + + Blob* const blob_bottom_data_; + Blob* const blob_bottom_targets_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(SigmoidCrossEntropyLossLayerTest, Dtypes); + + +TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLossCPU) { + Caffe::set_mode(Caffe::CPU); + this->TestForward(); +} + +TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestSigmoidCrossEntropyLossGPU) { + Caffe::set_mode(Caffe::GPU); + this->TestForward(); +} + +TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SigmoidCrossEntropyLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + +TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradientGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + SigmoidCrossEntropyLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_softmax_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_softmax_layer.cpp new file mode 100644 index 000000000..3ba302d4c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_softmax_layer.cpp @@ -0,0 +1,85 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class SoftmaxLayerTest : public ::testing::Test { + protected: + SoftmaxLayerTest() + : blob_bottom_(new Blob(2, 10, 1, 1)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~SoftmaxLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(SoftmaxLayerTest, Dtypes); + +TYPED_TEST(SoftmaxLayerTest, TestForwardCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SoftmaxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Test sum + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + TypeParam sum = 0; + for (int j = 0; j < this->blob_top_->channels(); ++j) { + sum += this->blob_top_->data_at(i, j, 0, 0); + } + EXPECT_GE(sum, 0.999); + EXPECT_LE(sum, 1.001); + } + // Test exact values + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + TypeParam scale = 0; + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + scale += exp(this->blob_bottom_->data_at(i, j, 0, 0)); + } + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + EXPECT_GE(this->blob_top_->data_at(i, j, 0, 0) + 1e-4, + exp(this->blob_bottom_->data_at(i, j, 0, 0)) / scale) + << "debug: " << i << " " << j; + EXPECT_LE(this->blob_top_->data_at(i, j, 0, 0) - 1e-4, + exp(this->blob_bottom_->data_at(i, j, 0, 0)) / scale) + << "debug: " << i << " " << j; + } + } +} + +TYPED_TEST(SoftmaxLayerTest, TestGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SoftmaxLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_softmax_with_loss_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_softmax_with_loss_layer.cpp new file mode 100644 index 000000000..8b8be8e8b --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -0,0 +1,73 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class SoftmaxWithLossLayerTest : public ::testing::Test { + protected: + SoftmaxWithLossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 1, 1, 1)) { + // fill the values + FillerParameter filler_param; + filler_param.set_std(10); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_label_->count(); ++i) { + blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + } + blob_bottom_vec_.push_back(blob_bottom_label_); + } + virtual ~SoftmaxWithLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + } + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(SoftmaxWithLossLayerTest, Dtypes); + + +TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SoftmaxWithLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + +TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + SoftmaxWithLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientSingle(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_), 0, -1, -1); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_split_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_split_layer.cpp new file mode 100644 index 000000000..327bcf937 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_split_layer.cpp @@ -0,0 +1,951 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "cuda_runtime.h" +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" +#include "caffe/util/insert_splits.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class SplitLayerTest : public ::testing::Test { + protected: + SplitLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_a_(new Blob()), + blob_top_b_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_a_); + blob_top_vec_.push_back(blob_top_b_); + } + virtual ~SplitLayerTest() { + delete blob_bottom_; + delete blob_top_a_; + delete blob_top_b_; + } + Blob* const blob_bottom_; + Blob* const blob_top_a_; + Blob* const blob_top_b_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(SplitLayerTest, Dtypes); + +TYPED_TEST(SplitLayerTest, TestSetup) { + LayerParameter layer_param; + SplitLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_a_->num(), 2); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 6); + EXPECT_EQ(this->blob_top_a_->width(), 5); + EXPECT_EQ(this->blob_top_b_->num(), 2); + EXPECT_EQ(this->blob_top_b_->channels(), 3); + EXPECT_EQ(this->blob_top_b_->height(), 6); + EXPECT_EQ(this->blob_top_b_->width(), 5); +} + +TYPED_TEST(SplitLayerTest, TestCPU) { + LayerParameter layer_param; + SplitLayer layer(layer_param); + Caffe::set_mode(Caffe::CPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + TypeParam bottom_value = this->blob_bottom_->cpu_data()[i]; + EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]); + EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]); + } +} + +TYPED_TEST(SplitLayerTest, TestGPU) { + LayerParameter layer_param; + SplitLayer layer(layer_param); + Caffe::set_mode(Caffe::GPU); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + TypeParam bottom_value = this->blob_bottom_->cpu_data()[i]; + EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]); + EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]); + } +} + +TYPED_TEST(SplitLayerTest, TestCPUInPlace) { + LayerParameter layer_param; + SplitLayer layer(layer_param); + Caffe::set_mode(Caffe::CPU); + this->blob_top_vec_[0] = this->blob_bottom_vec_[0]; + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + TypeParam bottom_value = this->blob_bottom_->cpu_data()[i]; + EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]); + } +} + +TYPED_TEST(SplitLayerTest, TestGPUInPlace) { + LayerParameter layer_param; + SplitLayer layer(layer_param); + Caffe::set_mode(Caffe::GPU); + this->blob_top_vec_[0] = this->blob_bottom_vec_[0]; + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + TypeParam bottom_value = this->blob_bottom_->cpu_data()[i]; + EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]); + } +} + +TYPED_TEST(SplitLayerTest, TestCPUGradient) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SplitLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(SplitLayerTest, TestGPUGradient) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + SplitLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(SplitLayerTest, TestCPUGradientInPlace) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + SplitLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + this->blob_top_vec_[0] = this->blob_bottom_vec_[0]; + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(SplitLayerTest, TestGPUGradientInPlace) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + SplitLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + this->blob_top_vec_[0] = this->blob_bottom_vec_[0]; + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + +class SplitLayerInsertionTest : public ::testing::Test { + protected: + void RunInsertionTest( + const string& input_param_string, const string& output_param_string) { + // Test that InsertSplits called on the proto specified by + // input_param_string results in the proto specified by + // output_param_string. + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_output_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + output_param_string, &expected_output_param)); + NetParameter actual_output_param; + InsertSplits(input_param, &actual_output_param); + EXPECT_EQ(expected_output_param.DebugString(), + actual_output_param.DebugString()); + // Also test idempotence. + NetParameter double_split_insert_param; + InsertSplits(actual_output_param, &double_split_insert_param); + EXPECT_EQ(actual_output_param.DebugString(), + double_split_insert_param.DebugString()); + } +}; + +TEST_F(SplitLayerInsertionTest, TestNoInsertion1) { + const string& input_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'innerprod' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layers: { " + " name: 'loss' " + " type: SOFTMAX_LOSS " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(SplitLayerInsertionTest, TestNoInsertion2) { + const string& input_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'data_split' " + " type: SPLIT " + " bottom: 'data' " + " top: 'data_split_0' " + " top: 'data_split_1' " + "} " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data_split_0' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'data_split_1' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'loss' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(SplitLayerInsertionTest, TestNoInsertionImageNet) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " name: 'data' " + " type: DATA " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batch_size: 256 " + " crop_size: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " name: 'conv1' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " name: 'relu1' " + " type: RELU " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " name: 'pool1' " + " type: POOLING " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " name: 'norm1' " + " type: LRN " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " name: 'conv2' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 5 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'norm1' " + " top: 'conv2' " + "} " + "layers { " + " name: 'relu2' " + " type: RELU " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " name: 'pool2' " + " type: POOLING " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " name: 'norm2' " + " type: LRN " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " name: 'conv3' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 384 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'norm2' " + " top: 'conv3' " + "} " + "layers { " + " name: 'relu3' " + " type: RELU " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " name: 'conv4' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 384 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'conv3' " + " top: 'conv4' " + "} " + "layers { " + " name: 'relu4' " + " type: RELU " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " name: 'conv5' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'conv4' " + " top: 'conv5' " + "} " + "layers { " + " name: 'relu5' " + " type: RELU " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " name: 'pool5' " + " type: POOLING " + " pooling_param { " + " kernel_size: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " name: 'fc6' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " name: 'relu6' " + " type: RELU " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'drop6' " + " type: DROPOUT " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'fc7' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " name: 'relu7' " + " type: RELU " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " name: 'drop7' " + " type: DROPOUT " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " name: 'fc8' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " name: 'loss' " + " type: SOFTMAX_LOSS " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(SplitLayerInsertionTest, TestNoInsertionWithInPlace) { + const string& input_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'innerprod' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layers: { " + " name: 'relu' " + " type: RELU " + " bottom: 'innerprod' " + " top: 'innerprod' " + "} " + "layers: { " + " name: 'loss' " + " type: SOFTMAX_LOSS " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(SplitLayerInsertionTest, TestInsertion) { + const string& input_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'innerprod3' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod3' " + "} " + "layers: { " + " name: 'loss1' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} " + "layers: { " + " name: 'loss2' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod2' " + " bottom: 'innerprod3' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'data_data_0_split' " + " type: SPLIT " + " bottom: 'data' " + " top: 'data' " + " top: 'data_data_0_split_1' " + " top: 'data_data_0_split_2' " + "} " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'data_data_0_split_1' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'innerprod2_innerprod2_0_split' " + " type: SPLIT " + " bottom: 'innerprod2' " + " top: 'innerprod2' " + " top: 'innerprod2_innerprod2_0_split_1' " + "} " + "layers: { " + " name: 'innerprod3' " + " type: INNER_PRODUCT " + " bottom: 'data_data_0_split_2' " + " top: 'innerprod3' " + "} " + "layers: { " + " name: 'loss1' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} " + "layers: { " + " name: 'loss2' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod2_innerprod2_0_split_1' " + " bottom: 'innerprod3' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(SplitLayerInsertionTest, TestInsertionTwoTop) { + const string& input_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'label' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'innerprod3' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod3' " + "} " + "layers: { " + " name: 'innerprod4' " + " type: INNER_PRODUCT " + " bottom: 'label' " + " top: 'innerprod4' " + "} " + "layers: { " + " name: 'loss1' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1' " + " bottom: 'innerprod3' " + "} " + "layers: { " + " name: 'loss2' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod2' " + " bottom: 'innerprod4' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'data_data_0_split' " + " type: SPLIT " + " bottom: 'data' " + " top: 'data' " + " top: 'data_data_0_split_1' " + "} " + "layers: { " + " name: 'label_data_1_split' " + " type: SPLIT " + " bottom: 'label' " + " top: 'label' " + " top: 'label_data_1_split_1' " + "} " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'label' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'innerprod3' " + " type: INNER_PRODUCT " + " bottom: 'data_data_0_split_1' " + " top: 'innerprod3' " + "} " + "layers: { " + " name: 'innerprod4' " + " type: INNER_PRODUCT " + " bottom: 'label_data_1_split_1' " + " top: 'innerprod4' " + "} " + "layers: { " + " name: 'loss1' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1' " + " bottom: 'innerprod3' " + "} " + "layers: { " + " name: 'loss2' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod2' " + " bottom: 'innerprod4' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(SplitLayerInsertionTest, TestInputInsertion) { + const string& input_proto = + "name: 'TestNetwork' " + "input: 'data' " + "input_dim: 10 " + "input_dim: 3 " + "input_dim: 227 " + "input_dim: 227 " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'loss' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "input: 'data' " + "input_dim: 10 " + "input_dim: 3 " + "input_dim: 227 " + "input_dim: 227 " + "layers: { " + " name: 'data_input_0_split' " + " type: SPLIT " + " bottom: 'data' " + " top: 'data' " + " top: 'data_input_0_split_1' " + "} " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'data_input_0_split_1' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'loss' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(SplitLayerInsertionTest, TestWithInPlace) { + const string& input_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'relu1' " + " type: RELU " + " bottom: 'innerprod1' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'innerprod1' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'loss1' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1' " + " bottom: 'label' " + "} " + "layers: { " + " name: 'loss2' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod2' " + " bottom: 'data' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layers: { " + " name: 'data' " + " type: DATA " + " top: 'data' " + " top: 'label' " + "} " + "layers: { " + " name: 'data_data_0_split' " + " type: SPLIT " + " bottom: 'data' " + " top: 'data' " + " top: 'data_data_0_split_1' " + "} " + "layers: { " + " name: 'innerprod1' " + " type: INNER_PRODUCT " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'relu1' " + " type: RELU " + " bottom: 'innerprod1' " + " top: 'innerprod1' " + "} " + "layers: { " + " name: 'innerprod1_relu1_0_split' " + " type: SPLIT " + " bottom: 'innerprod1' " + " top: 'innerprod1' " + " top: 'innerprod1_relu1_0_split_1' " + "} " + "layers: { " + " name: 'innerprod2' " + " type: INNER_PRODUCT " + " bottom: 'innerprod1' " + " top: 'innerprod2' " + "} " + "layers: { " + " name: 'loss1' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod1_relu1_0_split_1' " + " bottom: 'label' " + "} " + "layers: { " + " name: 'loss2' " + " type: EUCLIDEAN_LOSS " + " bottom: 'innerprod2' " + " bottom: 'data_data_0_split_1' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_stochastic_pooling.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_stochastic_pooling.cpp new file mode 100644 index 000000000..0ad8123f8 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_stochastic_pooling.cpp @@ -0,0 +1,168 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +using std::min; + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class StochasticPoolingLayerTest : public ::testing::Test { + protected: + StochasticPoolingLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + filler_param.set_min(0.1); + filler_param.set_max(1.); + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~StochasticPoolingLayerTest() { + delete blob_bottom_; delete blob_top_; + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(StochasticPoolingLayerTest, Dtypes); + +TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { + Caffe::set_mode(Caffe::GPU); + Caffe::set_phase(Caffe::TRAIN); + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + + // Check if the output is correct - it should do random sampling + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + TypeParam total = 0; + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int ph = 0; ph < this->blob_top_->height(); ++ph) { + for (int pw = 0; pw < this->blob_top_->width(); ++pw) { + TypeParam pooled = top_data[this->blob_top_->offset(n, c, ph, pw)]; + total += pooled; + int hstart = ph * 2; + int hend = min(hstart + 3, this->blob_bottom_->height()); + int wstart = pw * 2; + int wend = min(wstart + 3, this->blob_bottom_->width()); + bool has_equal = false; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + has_equal |= (pooled == bottom_data[this->blob_bottom_-> + offset(n, c, h, w)]); + } + } + EXPECT_TRUE(has_equal); + } + } + } + } + // When we are doing stochastic pooling, the average we get should be higher + // than the simple data average since we are weighting more on higher-valued + // ones. + EXPECT_GE(total / this->blob_top_->count(), 0.55); +} + +TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { + Caffe::set_mode(Caffe::GPU); + Caffe::set_phase(Caffe::TEST); + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + + // Check if the output is correct - it should do random sampling + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int ph = 0; ph < this->blob_top_->height(); ++ph) { + for (int pw = 0; pw < this->blob_top_->width(); ++pw) { + TypeParam pooled = top_data[this->blob_top_->offset(n, c, ph, pw)]; + int hstart = ph * 2; + int hend = min(hstart + 3, this->blob_bottom_->height()); + int wstart = pw * 2; + int wend = min(wstart + 3, this->blob_bottom_->width()); + bool smaller_than_max = false; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + smaller_than_max |= (pooled <= bottom_data[this->blob_bottom_-> + offset(n, c, h, w)]); + } + } + EXPECT_TRUE(smaller_than_max); + } + } + } + } +} + +TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { + Caffe::set_mode(Caffe::GPU); + Caffe::set_phase(Caffe::TRAIN); + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); + PoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + // it is too expensive to call curand multiple times, so we don't do an + // exhaustive gradient check. + checker.CheckGradient(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_syncedmem.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_syncedmem.cpp new file mode 100644 index 000000000..cd7475898 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_syncedmem.cpp @@ -0,0 +1,90 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/common.hpp" +#include "caffe/syncedmem.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +class SyncedMemoryTest : public ::testing::Test {}; + +TEST_F(SyncedMemoryTest, TestInitialization) { + SyncedMemory mem(10); + EXPECT_EQ(mem.head(), SyncedMemory::UNINITIALIZED); + EXPECT_EQ(mem.size(), 10); + SyncedMemory* p_mem = new SyncedMemory(10 * sizeof(float)); + EXPECT_EQ(p_mem->size(), 10 * sizeof(float)); + delete p_mem; +} + +TEST_F(SyncedMemoryTest, TestAllocation) { + SyncedMemory mem(10); + EXPECT_TRUE(mem.cpu_data()); + EXPECT_TRUE(mem.gpu_data()); + EXPECT_TRUE(mem.mutable_cpu_data()); + EXPECT_TRUE(mem.mutable_gpu_data()); +} + +TEST_F(SyncedMemoryTest, TestCPUWrite) { + SyncedMemory mem(10); + void* cpu_data = mem.mutable_cpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); + memset(cpu_data, 1, mem.size()); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((reinterpret_cast(cpu_data))[i], 1); + } + const void* gpu_data = mem.gpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); + // check if values are the same + char* recovered_value = new char[10]; + cudaMemcpy(reinterpret_cast(recovered_value), gpu_data, 10, + cudaMemcpyDeviceToHost); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((reinterpret_cast(recovered_value))[i], 1); + } + // do another round + cpu_data = mem.mutable_cpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); + memset(cpu_data, 2, mem.size()); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((reinterpret_cast(cpu_data))[i], 2); + } + gpu_data = mem.gpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); + // check if values are the same + cudaMemcpy(reinterpret_cast(recovered_value), gpu_data, 10, + cudaMemcpyDeviceToHost); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((reinterpret_cast(recovered_value))[i], 2); + } + delete[] recovered_value; +} + +TEST_F(SyncedMemoryTest, TestGPUWrite) { + SyncedMemory mem(10); + void* gpu_data = mem.mutable_gpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); + CUDA_CHECK(cudaMemset(gpu_data, 1, mem.size())); + const void* cpu_data = mem.cpu_data(); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((reinterpret_cast(cpu_data))[i], 1); + } + EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); + + gpu_data = mem.mutable_gpu_data(); + EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_GPU); + CUDA_CHECK(cudaMemset(gpu_data, 2, mem.size())); + cpu_data = mem.cpu_data(); + for (int i = 0; i < mem.size(); ++i) { + EXPECT_EQ((reinterpret_cast(cpu_data))[i], 2); + } + EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_tanh_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_tanh_layer.cpp new file mode 100644 index 000000000..9c9f8a74a --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_tanh_layer.cpp @@ -0,0 +1,109 @@ +// Copyright 2014 BVLC and contributors. +// Adapted from other test files + +#include +#include +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class TanHLayerTest : public ::testing::Test { + protected: + TanHLayerTest() + : blob_bottom_(new Blob(2, 10, 1, 1)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~TanHLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(TanHLayerTest, Dtypes); + +TYPED_TEST(TanHLayerTest, TestForwardCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + TanHLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Test exact values + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int k = 0; k < this->blob_bottom_->height(); ++k) { + for (int l = 0; l < this->blob_bottom_->width(); ++l) { + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + } + } + } + } +} + +TYPED_TEST(TanHLayerTest, TestGradientCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + TanHLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +TYPED_TEST(TanHLayerTest, TestForwardGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + TanHLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Test exact values + for (int i = 0; i < this->blob_bottom_->num(); ++i) { + for (int j = 0; j < this->blob_bottom_->channels(); ++j) { + for (int k = 0; k < this->blob_bottom_->height(); ++k) { + for (int l = 0; l < this->blob_bottom_->width(); ++l) { + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + } + } + } + } +} + +TYPED_TEST(TanHLayerTest, TestGradientGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + TanHLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), + &(this->blob_top_vec_)); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_threshold_layer.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_threshold_layer.cpp new file mode 100644 index 000000000..8303e4420 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_threshold_layer.cpp @@ -0,0 +1,150 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "cuda_runtime.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class ThresholdLayerTest : public ::testing::Test { + protected: + ThresholdLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~ThresholdLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types Dtypes; +TYPED_TEST_CASE(ThresholdLayerTest, Dtypes); + + +TYPED_TEST(ThresholdLayerTest, TestSetup) { + LayerParameter layer_param; + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_->width()); +} + +TYPED_TEST(ThresholdLayerTest, TestCPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + const TypeParam threshold_ = layer_param.threshold_param().threshold(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + if (top_data[i] == 0) { + EXPECT_LE(bottom_data[i], threshold_); + } + if (top_data[i] == 1) { + EXPECT_GT(bottom_data[i], threshold_); + } + } +} + +TYPED_TEST(ThresholdLayerTest, TestCPU2) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); + ThresholdParameter* threshold_param = + layer_param.mutable_threshold_param(); + threshold_param->set_threshold(0.5); + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + const TypeParam threshold_ = layer_param.threshold_param().threshold(); + EXPECT_FLOAT_EQ(threshold_, 0.5); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + if (top_data[i] == 0) { + EXPECT_LE(bottom_data[i], threshold_); + } + if (top_data[i] == 1) { + EXPECT_GT(bottom_data[i], threshold_); + } + } +} + +TYPED_TEST(ThresholdLayerTest, TestGPU) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + const TypeParam threshold_ = layer_param.threshold_param().threshold(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + if (top_data[i] == 0) { + EXPECT_LE(bottom_data[i], threshold_); + } + if (top_data[i] == 1) { + EXPECT_GT(bottom_data[i], threshold_); + } + } +} + +TYPED_TEST(ThresholdLayerTest, TestGPU2) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + ThresholdParameter* threshold_param = + layer_param.mutable_threshold_param(); + threshold_param->set_threshold(0.5); + ThresholdLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + // Now, check values + const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); + const TypeParam* top_data = this->blob_top_->cpu_data(); + const TypeParam threshold_ = layer_param.threshold_param().threshold(); + EXPECT_FLOAT_EQ(threshold_, 0.5); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_LE(top_data[i], 1.); + if (top_data[i] == 0) { + EXPECT_LE(bottom_data[i], threshold_); + } + if (top_data[i] == 1) { + EXPECT_GT(bottom_data[i], threshold_); + } + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_upgrade_proto.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_upgrade_proto.cpp new file mode 100644 index 000000000..9203f5583 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_upgrade_proto.cpp @@ -0,0 +1,2437 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "cuda_runtime.h" +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/util/upgrade_proto.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +using std::string; + +namespace caffe { + +class PaddingLayerUpgradeTest : public ::testing::Test { + protected: + void RunPaddingUpgradeTest( + const string& input_param_string, const string& output_param_string) { + // Test that UpgradeV0PaddingLayers called on the proto specified by + // input_param_string results in the proto specified by + // output_param_string. + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_output_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + output_param_string, &expected_output_param)); + NetParameter actual_output_param; + UpgradeV0PaddingLayers(input_param, &actual_output_param); + EXPECT_EQ(expected_output_param.DebugString(), + actual_output_param.DebugString()); + // Also test idempotence. + NetParameter double_pad_upgrade_param; + UpgradeV0PaddingLayers(actual_output_param, &double_pad_upgrade_param); + EXPECT_EQ(actual_output_param.DebugString(), + double_pad_upgrade_param.DebugString()); + } +}; + +TEST_F(PaddingLayerUpgradeTest, TestSimple) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'pad1' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'data' " + " top: 'pad1' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunPaddingUpgradeTest(input_proto, expected_output_proto); +} + +TEST_F(PaddingLayerUpgradeTest, TestTwoTops) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'pad1' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'data' " + " top: 'pad1' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad1' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunPaddingUpgradeTest(input_proto, expected_output_proto); +} + +TEST_F(PaddingLayerUpgradeTest, TestImageNet) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'relu1' " + " type: 'relu' " + " } " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'pool1' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " layer { " + " name: 'norm1' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " layer { " + " name: 'pad2' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'norm1' " + " top: 'pad2' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 5 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'relu2' " + " type: 'relu' " + " } " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'pool2' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " layer { " + " name: 'norm2' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " layer { " + " name: 'pad3' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'norm2' " + " top: 'pad3' " + "} " + "layers { " + " layer { " + " name: 'conv3' " + " type: 'conv' " + " num_output: 384 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'relu3' " + " type: 'relu' " + " } " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'pad4' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'conv3' " + " top: 'pad4' " + "} " + "layers { " + " layer { " + " name: 'conv4' " + " type: 'conv' " + " num_output: 384 " + " group: 2 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'relu4' " + " type: 'relu' " + " } " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'pad5' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'conv4' " + " top: 'pad5' " + "} " + "layers { " + " layer { " + " name: 'conv5' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'relu5' " + " type: 'relu' " + " } " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'pool5' " + " type: 'pool' " + " kernelsize: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " layer { " + " name: 'fc6' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'relu6' " + " type: 'relu' " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'drop6' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'fc7' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'relu7' " + " type: 'relu' " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'drop7' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'relu1' " + " type: 'relu' " + " } " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'pool1' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " layer { " + " name: 'norm1' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 5 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'norm1' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'relu2' " + " type: 'relu' " + " } " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'pool2' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " layer { " + " name: 'norm2' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " layer { " + " name: 'conv3' " + " type: 'conv' " + " num_output: 384 " + " kernelsize: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'norm2' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'relu3' " + " type: 'relu' " + " } " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'conv4' " + " type: 'conv' " + " num_output: 384 " + " group: 2 " + " kernelsize: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv3' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'relu4' " + " type: 'relu' " + " } " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'conv5' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv4' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'relu5' " + " type: 'relu' " + " } " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'pool5' " + " type: 'pool' " + " kernelsize: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " layer { " + " name: 'fc6' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'relu6' " + " type: 'relu' " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'drop6' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'fc7' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'relu7' " + " type: 'relu' " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'drop7' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunPaddingUpgradeTest(input_proto, expected_output_proto); +} + +class V0UpgradeTest : public ::testing::Test { + protected: + void RunV0UpgradeTest( + const string& input_param_string, const string& output_param_string) { + // Test that UpgradeV0Net called on the NetParameter proto specified by + // input_param_string results in the NetParameter proto specified by + // output_param_string. + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_output_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + output_param_string, &expected_output_param)); + NetParameter actual_output_param; + UpgradeV0Net(input_param, &actual_output_param); + EXPECT_EQ(expected_output_param.DebugString(), + actual_output_param.DebugString()); + } +}; + +TEST_F(V0UpgradeTest, TestSimple) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'pad1' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'data' " + " top: 'pad1' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "layers { " + " name: 'data' " + " type: DATA " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batch_size: 256 " + " crop_size: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " name: 'conv1' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " name: 'fc8' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'conv1' " + " top: 'fc8' " + "} " + "layers { " + " name: 'loss' " + " type: SOFTMAX_LOSS " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunV0UpgradeTest(input_proto, expected_output_proto); +} + +// Test any layer or parameter upgrades not covered by other tests. +TEST_F(V0UpgradeTest, TestAllParams) { + const string& input_proto = + "name: 'CaffeNet' " + "input: 'input_data' " + "input_dim: 64 " + "input_dim: 3 " + "input_dim: 32 " + "input_dim: 32 " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " scale: 0.25 " + " rand_skip: 73 " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'images' " + " type: 'images' " + " source: '/home/jiayq/Data/ILSVRC12/train-images' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " scale: 0.25 " + " rand_skip: 73 " + " shuffle_images: true " + " new_height: 40 " + " new_width: 30 " + " } " + " top: 'images_data' " + " top: 'images_label' " + "} " + "layers { " + " layer { " + " name: 'window_data' " + " type: 'window_data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " det_fg_threshold: 0.25 " + " det_bg_threshold: 0.75 " + " det_fg_fraction: 0.5 " + " det_context_pad: 16 " + " det_crop_mode: 'square' " + " } " + " top: 'window_data' " + " top: 'window_label' " + "} " + "layers { " + " layer { " + " name: 'hdf5data' " + " type: 'hdf5_data' " + " source: '/my/hdf5/data' " + " batchsize: 256 " + " } " + " top: 'hdf5data' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " biasterm: false " + " pad: 4 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 3. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'pool1ave' " + " type: 'pool' " + " pool: AVE " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1ave' " + "} " + "layers { " + " layer { " + " name: 'pool1stoch' " + " type: 'pool' " + " pool: STOCHASTIC " + " kernelsize: 4 " + " stride: 5 " + " } " + " bottom: 'conv1' " + " top: 'pool1stoch' " + "} " + "layers { " + " layer { " + " name: 'concat' " + " type: 'concat' " + " concat_dim: 2 " + " } " + " bottom: 'pool1ave' " + " bottom: 'pool1stoch' " + " top: 'pool1concat' " + "} " + "layers { " + " layer { " + " name: 'norm1' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1concat' " + " top: 'norm1' " + "} " + "layers { " + " layer { " + " name: 'fc6' " + " type: 'innerproduct' " + " num_output: 4096 " + " biasterm: false " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'norm1' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'relu6' " + " type: 'relu' " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'drop6' " + " type: 'dropout' " + " dropout_ratio: 0.2 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'infogain_loss' " + " source: '/my/infogain/matrix' " + " } " + " bottom: 'fc6' " + " bottom: 'label' " + "} " + "layers { " + " layer { " + " name: 'accuracy' " + " type: 'accuracy' " + " } " + "} " + "layers { " + " layer { " + " name: 'bnll' " + " type: 'bnll' " + " } " + "} " + "layers { " + " layer { " + " name: 'euclidean_loss' " + " type: 'euclidean_loss' " + " } " + "} " + "layers { " + " layer { " + " name: 'flatten' " + " type: 'flatten' " + " } " + "} " + "layers { " + " layer { " + " name: 'hdf5_output' " + " type: 'hdf5_output' " + " hdf5_output_param { " + " file_name: '/my/hdf5/output/file' " + " } " + " } " + "} " + "layers { " + " layer { " + " name: 'im2col' " + " type: 'im2col' " + " } " + "} " + "layers { " + " layer { " + " name: 'images' " + " type: 'images' " + " } " + "} " + "layers { " + " layer { " + " name: 'multinomial_logistic_loss' " + " type: 'multinomial_logistic_loss' " + " } " + "} " + "layers { " + " layer { " + " name: 'sigmoid' " + " type: 'sigmoid' " + " } " + "} " + "layers { " + " layer { " + " name: 'softmax' " + " type: 'softmax' " + " } " + "} " + "layers { " + " layer { " + " name: 'split' " + " type: 'split' " + " } " + "} " + "layers { " + " layer { " + " name: 'tanh' " + " type: 'tanh' " + " } " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "input: 'input_data' " + "input_dim: 64 " + "input_dim: 3 " + "input_dim: 32 " + "input_dim: 32 " + "layers { " + " name: 'data' " + " type: DATA " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batch_size: 256 " + " crop_size: 227 " + " mirror: true " + " scale: 0.25 " + " rand_skip: 73 " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " name: 'images' " + " type: IMAGE_DATA " + " image_data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-images' " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batch_size: 256 " + " crop_size: 227 " + " mirror: true " + " scale: 0.25 " + " rand_skip: 73 " + " shuffle: true " + " new_height: 40 " + " new_width: 30 " + " } " + " top: 'images_data' " + " top: 'images_label' " + "} " + "layers { " + " name: 'window_data' " + " type: WINDOW_DATA " + " window_data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batch_size: 256 " + " crop_size: 227 " + " mirror: true " + " fg_threshold: 0.25 " + " bg_threshold: 0.75 " + " fg_fraction: 0.5 " + " context_pad: 16 " + " crop_mode: 'square' " + " } " + " top: 'window_data' " + " top: 'window_label' " + "} " + "layers { " + " name: 'hdf5data' " + " type: HDF5_DATA " + " hdf5_data_param { " + " source: '/my/hdf5/data' " + " batch_size: 256 " + " } " + " top: 'hdf5data' " + "} " + "layers { " + " name: 'conv1' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 96 " + " bias_term: false " + " pad: 4 " + " kernel_size: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 3. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " name: 'pool1ave' " + " type: POOLING " + " pooling_param { " + " pool: AVE " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1ave' " + "} " + "layers { " + " name: 'pool1stoch' " + " type: POOLING " + " pooling_param { " + " pool: STOCHASTIC " + " kernel_size: 4 " + " stride: 5 " + " } " + " bottom: 'conv1' " + " top: 'pool1stoch' " + "} " + "layers { " + " name: 'concat' " + " type: CONCAT " + " concat_param { " + " concat_dim: 2 " + " } " + " bottom: 'pool1ave' " + " bottom: 'pool1stoch' " + " top: 'pool1concat' " + "} " + "layers { " + " name: 'norm1' " + " type: LRN " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1concat' " + " top: 'norm1' " + "} " + "layers { " + " name: 'fc6' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 4096 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'norm1' " + " top: 'fc6' " + "} " + "layers { " + " name: 'relu6' " + " type: RELU " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'drop6' " + " type: DROPOUT " + " dropout_param { " + " dropout_ratio: 0.2 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'loss' " + " type: INFOGAIN_LOSS " + " infogain_loss_param { " + " source: '/my/infogain/matrix' " + " } " + " bottom: 'fc6' " + " bottom: 'label' " + "} " + "layers { " + " name: 'accuracy' " + " type: ACCURACY " + "} " + "layers { " + " name: 'bnll' " + " type: BNLL " + "} " + "layers { " + " name: 'euclidean_loss' " + " type: EUCLIDEAN_LOSS " + "} " + "layers { " + " name: 'flatten' " + " type: FLATTEN " + "} " + "layers { " + " name: 'hdf5_output' " + " type: HDF5_OUTPUT " + " hdf5_output_param { " + " file_name: '/my/hdf5/output/file' " + " } " + "} " + "layers { " + " name: 'im2col' " + " type: IM2COL " + "} " + "layers { " + " name: 'images' " + " type: IMAGE_DATA " + "} " + "layers { " + " name: 'multinomial_logistic_loss' " + " type: MULTINOMIAL_LOGISTIC_LOSS " + "} " + "layers { " + " name: 'sigmoid' " + " type: SIGMOID " + "} " + "layers { " + " name: 'softmax' " + " type: SOFTMAX " + "} " + "layers { " + " name: 'split' " + " type: SPLIT " + "} " + "layers { " + " name: 'tanh' " + " type: TANH " + "} "; + this->RunV0UpgradeTest(input_proto, expected_output_proto); +} + +TEST_F(V0UpgradeTest, TestImageNet) { + const string& input_proto = + "name: 'CaffeNet' " + "layers { " + " layer { " + " name: 'data' " + " type: 'data' " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " meanfile: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batchsize: 256 " + " cropsize: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " layer { " + " name: 'conv1' " + " type: 'conv' " + " num_output: 96 " + " kernelsize: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'relu1' " + " type: 'relu' " + " } " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " layer { " + " name: 'pool1' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " layer { " + " name: 'norm1' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " layer { " + " name: 'pad2' " + " type: 'padding' " + " pad: 2 " + " } " + " bottom: 'norm1' " + " top: 'pad2' " + "} " + "layers { " + " layer { " + " name: 'conv2' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 5 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'relu2' " + " type: 'relu' " + " } " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " layer { " + " name: 'pool2' " + " type: 'pool' " + " pool: MAX " + " kernelsize: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " layer { " + " name: 'norm2' " + " type: 'lrn' " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " layer { " + " name: 'pad3' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'norm2' " + " top: 'pad3' " + "} " + "layers { " + " layer { " + " name: 'conv3' " + " type: 'conv' " + " num_output: 384 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'relu3' " + " type: 'relu' " + " } " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " layer { " + " name: 'pad4' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'conv3' " + " top: 'pad4' " + "} " + "layers { " + " layer { " + " name: 'conv4' " + " type: 'conv' " + " num_output: 384 " + " group: 2 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'relu4' " + " type: 'relu' " + " } " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " layer { " + " name: 'pad5' " + " type: 'padding' " + " pad: 1 " + " } " + " bottom: 'conv4' " + " top: 'pad5' " + "} " + "layers { " + " layer { " + " name: 'conv5' " + " type: 'conv' " + " num_output: 256 " + " group: 2 " + " kernelsize: 3 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pad5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'relu5' " + " type: 'relu' " + " } " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " layer { " + " name: 'pool5' " + " type: 'pool' " + " kernelsize: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " layer { " + " name: 'fc6' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'relu6' " + " type: 'relu' " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'drop6' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " layer { " + " name: 'fc7' " + " type: 'innerproduct' " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'relu7' " + " type: 'relu' " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'drop7' " + " type: 'dropout' " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " layer { " + " name: 'fc8' " + " type: 'innerproduct' " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " layer { " + " name: 'loss' " + " type: 'softmax_loss' " + " } " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + const string& expected_output_proto = + "name: 'CaffeNet' " + "layers { " + " name: 'data' " + " type: DATA " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " batch_size: 256 " + " crop_size: 227 " + " mirror: true " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layers { " + " name: 'conv1' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layers { " + " name: 'relu1' " + " type: RELU " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layers { " + " name: 'pool1' " + " type: POOLING " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layers { " + " name: 'norm1' " + " type: LRN " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layers { " + " name: 'conv2' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 5 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'norm1' " + " top: 'conv2' " + "} " + "layers { " + " name: 'relu2' " + " type: RELU " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layers { " + " name: 'pool2' " + " type: POOLING " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layers { " + " name: 'norm2' " + " type: LRN " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layers { " + " name: 'conv3' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 384 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'norm2' " + " top: 'conv3' " + "} " + "layers { " + " name: 'relu3' " + " type: RELU " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layers { " + " name: 'conv4' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 384 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'conv3' " + " top: 'conv4' " + "} " + "layers { " + " name: 'relu4' " + " type: RELU " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layers { " + " name: 'conv5' " + " type: CONVOLUTION " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'conv4' " + " top: 'conv5' " + "} " + "layers { " + " name: 'relu5' " + " type: RELU " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layers { " + " name: 'pool5' " + " type: POOLING " + " pooling_param { " + " kernel_size: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layers { " + " name: 'fc6' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layers { " + " name: 'relu6' " + " type: RELU " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'drop6' " + " type: DROPOUT " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layers { " + " name: 'fc7' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layers { " + " name: 'relu7' " + " type: RELU " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " name: 'drop7' " + " type: DROPOUT " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layers { " + " name: 'fc8' " + " type: INNER_PRODUCT " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " blobs_lr: 1. " + " blobs_lr: 2. " + " weight_decay: 1. " + " weight_decay: 0. " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layers { " + " name: 'loss' " + " type: SOFTMAX_LOSS " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunV0UpgradeTest(input_proto, expected_output_proto); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/test/test_util_blas.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_util_blas.cpp new file mode 100644 index 000000000..2e4c67959 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/test/test_util_blas.cpp @@ -0,0 +1,135 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "cuda_runtime.h" +#include "cublas_v2.h" + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +typedef ::testing::Types Dtypes; + +template +class GemmTest : public ::testing::Test {}; + +TYPED_TEST_CASE(GemmTest, Dtypes); + +TYPED_TEST(GemmTest, TestGemm) { + Blob A(1, 1, 2, 3); + Blob B(1, 1, 3, 4); + Blob C(1, 1, 2, 4); + TypeParam data[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; + TypeParam A_reshape_data[6] = {1, 4, 2, 5, 3, 6}; + TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12}; + TypeParam result[8] = {38, 44, 50, 56, 83, 98, 113, 128}; + memcpy(A.mutable_cpu_data(), data, 6 * sizeof(TypeParam)); + memcpy(B.mutable_cpu_data(), data, 12 * sizeof(TypeParam)); + + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12]; + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed A + A.Reshape(1, 1, 3, 2); + memcpy(A.mutable_cpu_data(), A_reshape_data, 6 * sizeof(TypeParam)); + caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + caffe_gpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed A and a transposed B too + B.Reshape(1, 1, 4, 3); + memcpy(B.mutable_cpu_data(), B_reshape_data, 12 * sizeof(TypeParam)); + caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + caffe_gpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + + // Test when we have a transposed B + A.Reshape(1, 1, 2, 3); + memcpy(A.mutable_cpu_data(), data, 6 * sizeof(TypeParam)); + caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., + A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + caffe_gpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., + A.gpu_data(), B.gpu_data(), 0., C.mutable_gpu_data()); + for (int i = 0; i < 8; ++i) { + EXPECT_EQ(C.cpu_data()[i], result[i]); + } + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + + +TYPED_TEST(GemmTest, TestGemv) { + Blob A(1, 1, 2, 3); + Blob x(1, 1, 1, 3); + Blob y(1, 1, 1, 2); + TypeParam data[6] = {1, 2, 3, 4, 5, 6}; + TypeParam result_2[2] = {14, 32}; + TypeParam result_3[3] = {9, 12, 15}; + memcpy(A.mutable_cpu_data(), data, 6 * sizeof(TypeParam)); + memcpy(x.mutable_cpu_data(), data, 3 * sizeof(TypeParam)); + + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + caffe_cpu_gemv(CblasNoTrans, 2, 3, 1., A.cpu_data(), + x.cpu_data(), 0., y.mutable_cpu_data()); + for (int i = 0; i < 2; ++i) { + EXPECT_EQ(y.cpu_data()[i], result_2[i]); + } + caffe_gpu_gemv(CblasNoTrans, 2, 3, 1., A.gpu_data(), + x.gpu_data(), 0., y.mutable_gpu_data()); + for (int i = 0; i < 2; ++i) { + EXPECT_EQ(y.cpu_data()[i], result_2[i]); + } + + // Test transpose case + memcpy(y.mutable_cpu_data(), data, 2 * sizeof(TypeParam)); + caffe_cpu_gemv(CblasTrans, 2, 3, 1., A.cpu_data(), + y.cpu_data(), 0., x.mutable_cpu_data()); + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(x.cpu_data()[i], result_3[i]); + } + caffe_gpu_gemv(CblasTrans, 2, 3, 1., A.gpu_data(), + y.gpu_data(), 0., x.mutable_gpu_data()); + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(x.cpu_data()[i], result_3[i]); + } + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/benchmark.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/util/benchmark.cpp new file mode 100644 index 000000000..0bd852182 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/benchmark.cpp @@ -0,0 +1,80 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/benchmark.hpp" + +namespace caffe { + +Timer::Timer() + : initted_(false), + running_(false), + has_run_at_least_once_(false) { + Init(); +} + +Timer::~Timer() { + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaEventDestroy(start_gpu_)); + CUDA_CHECK(cudaEventDestroy(stop_gpu_)); + } +} + +void Timer::Start() { + if (!running()) { + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaEventRecord(start_gpu_, 0)); + } else { + start_cpu_ = boost::posix_time::microsec_clock::local_time(); + } + running_ = true; + has_run_at_least_once_ = true; + } +} + +void Timer::Stop() { + if (running()) { + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); + CUDA_CHECK(cudaEventSynchronize(stop_gpu_)); + } else { + stop_cpu_ = boost::posix_time::microsec_clock::local_time(); + } + running_ = false; + } +} + +float Timer::MilliSeconds() { + if (!has_run_at_least_once()) { + LOG(WARNING) << "Timer has never been run before reading time."; + return 0; + } + if (running()) { + Stop(); + } + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, + stop_gpu_)); + } else { + elapsed_milliseconds_ = (stop_cpu_ - start_cpu_).total_milliseconds(); + } + return elapsed_milliseconds_; +} + +float Timer::Seconds() { + return MilliSeconds() / 1000.; +} + +void Timer::Init() { + if (!initted()) { + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaEventCreate(&start_gpu_)); + CUDA_CHECK(cudaEventCreate(&stop_gpu_)); + } + initted_ = true; + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/format.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/util/format.cpp new file mode 100644 index 000000000..693e1d1fd --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/format.cpp @@ -0,0 +1,54 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/format.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { +using std::string; + +bool OpenCVImageToDatum( + const cv::Mat& image, const int label, const int height, + const int width, const bool is_color, Datum* datum) { + cv::Mat cv_img; + CHECK(image.data) << "Image data must not be NULL"; + CHECK_GT(image.rows, 0) << "Image height must be positive"; + CHECK_GT(image.cols, 0) << "Image width must be positive"; + if (height > 0 && width > 0 && + (image.rows != height || image.cols != width)) { + cv::resize(image, cv_img, cv::Size(width, height)); + } else { + cv_img = image; + } + int num_channels = (is_color ? 3 : 1); + datum->set_channels(num_channels); + datum->set_height(cv_img.rows); + datum->set_width(cv_img.cols); + datum->set_label(label); + datum->clear_data(); + datum->clear_float_data(); + string* datum_string = datum->mutable_data(); + if (is_color) { + for (int c = 0; c < num_channels; ++c) { + for (int h = 0; h < cv_img.rows; ++h) { + for (int w = 0; w < cv_img.cols; ++w) { + datum_string->push_back( + static_cast(cv_img.at(h, w)[c])); + } + } + } + } else { // Faster than repeatedly testing is_color for each pixel w/i loop + for (int h = 0; h < cv_img.rows; ++h) { + for (int w = 0; w < cv_img.cols; ++w) { + datum_string->push_back( + static_cast(cv_img.at(h, w))); + } + } + } + return true; +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/im2col.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/util/im2col.cpp new file mode 100644 index 000000000..037410e29 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/im2col.cpp @@ -0,0 +1,76 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include "caffe/util/im2col.hpp" + +namespace caffe { + +template +void im2col_cpu(const Dtype* data_im, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_col) { + int height_col = (height + 2 * pad - ksize) / stride + 1; + int width_col = (width + 2 * pad - ksize) / stride + 1; + int channels_col = channels * ksize * ksize; + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % ksize; + int h_offset = (c / ksize) % ksize; + int c_im = c / ksize / ksize; + for (int h = 0; h < height_col; ++h) { + for (int w = 0; w < width_col; ++w) { + int h_pad = h * stride - pad + h_offset; + int w_pad = w * stride - pad + w_offset; + if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) + data_col[(c * height_col + h) * width_col + w] = + data_im[(c_im * height + h_pad) * width + w_pad]; + else + data_col[(c * height_col + h) * width_col + w] = 0; + } + } + } +} + +// Explicit instantiation +template void im2col_cpu(const float* data_im, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, float* data_col); +template void im2col_cpu(const double* data_im, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, double* data_col); + +template +void col2im_cpu(const Dtype* data_col, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_im) { + memset(data_im, 0, sizeof(Dtype) * height * width * channels); + int height_col = (height + 2 * pad - ksize) / stride + 1; + int width_col = (width + 2 * pad - ksize) / stride + 1; + int channels_col = channels * ksize * ksize; + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % ksize; + int h_offset = (c / ksize) % ksize; + int c_im = c / ksize / ksize; + for (int h = 0; h < height_col; ++h) { + for (int w = 0; w < width_col; ++w) { + int h_pad = h * stride - pad + h_offset; + int w_pad = w * stride - pad + w_offset; + if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) + data_im[(c_im * height + h_pad) * width + w_pad] += + data_col[(c * height_col + h) * width_col + w]; + } + } + } +} + +// Explicit instantiation +template void col2im_cpu(const float* data_col, const int channels, + const int height, const int width, const int psize, const int pad, + const int stride, float* data_im); +template void col2im_cpu(const double* data_col, const int channels, + const int height, const int width, const int psize, const int pad, + const int stride, double* data_im); + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/im2col.cu b/modules/dnns_easily_fooled/caffe/src/caffe/util/im2col.cu new file mode 100644 index 000000000..6aecb0e57 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/im2col.cu @@ -0,0 +1,132 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/im2col.hpp" + +namespace caffe { + +template +__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, + const int height, const int width, const int ksize, const int pad, + const int stride, const int height_col, const int width_col, + Dtype* data_col) { + CUDA_KERNEL_LOOP(index, n) { + int w_out = index % width_col; + index /= width_col; + int h_out = index % height_col; + int channel_in = index / height_col; + int channel_out = channel_in * ksize * ksize; + int h_in = h_out * stride - pad; + int w_in = w_out * stride - pad; + data_col += (channel_out * height_col + h_out) * width_col + w_out; + data_im += (channel_in * height + h_in) * width + w_in; + for (int i = 0; i < ksize; ++i) { + for (int j = 0; j < ksize; ++j) { + int h = h_in + i; + int w = w_in + j; + *data_col = (h >= 0 && w >= 0 && h < height && w < width) ? + data_im[i * width + j] : 0; + data_col += height_col * width_col; + } + } + } +} + +template +void im2col_gpu(const Dtype* data_im, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_col) { + // We are going to launch channels * height_col * width_col kernels, each + // kernel responsible for copying a single-channel grid. + int height_col = (height + 2 * pad - ksize) / stride + 1; + int width_col = (width + 2 * pad - ksize) / stride + 1; + int num_kernels = channels * height_col * width_col; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel<<>>( + num_kernels, data_im, height, width, ksize, pad, stride, height_col, + width_col, data_col); + CUDA_POST_KERNEL_CHECK; +} + + +// Explicit instantiation +template void im2col_gpu(const float* data_im, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, float* data_col); +template void im2col_gpu(const double* data_im, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, double* data_col); + +template +__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, + const int height, const int width, const int channels, const int ksize, + const int pad, const int stride, const int height_col, const int width_col, + Dtype* data_im) { + CUDA_KERNEL_LOOP(index, n) { + Dtype val = 0; + int w = index % width + pad; + int h = (index / width) % height + pad; + int c = index / (width * height); + // compute the start and end of the output + int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1; + int w_col_end = min(w / stride + 1, width_col); + int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1; + int h_col_end = min(h / stride + 1, height_col); + /* + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + // the col location: [c * width * height + h_out, w_out] + int c_col = c * ksize * ksize + (h - h_col * stride) * ksize + (w - w_col * stride); + val += data_col[(c_col * height_col + h_col) * width_col + w_col]; + } + } + */ + // equivalent implementation + int offset = (c * ksize * ksize + h * ksize + w) * height_col * width_col; + int coeff_h_col = (1 - stride * ksize * height_col) * width_col; + int coeff_w_col = (1 - stride * height_col * width_col); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + data_im[index] = val; + } +} + +template +void col2im_gpu(const Dtype* data_col, const int channels, + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_im) { + // CUDA_CHECK(cudaMemset(data_im, 0, + // sizeof(Dtype) * height * width * channels)); + int height_col = (height + 2 * pad - ksize) / stride + 1; + int width_col = (width + 2 * pad - ksize) / stride + 1; + int num_kernels = channels * height * width; + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + // NOLINT_NEXT_LINE(whitespace/operators) + col2im_gpu_kernel<<>>( + num_kernels, data_col, height, width, channels, ksize, pad, stride, + height_col, width_col, data_im); + CUDA_POST_KERNEL_CHECK; +} + + +// Explicit instantiation +template void col2im_gpu(const float* data_col, const int channels, + const int height, const int width, const int psize, const int pad, + const int stride, float* data_im); +template void col2im_gpu(const double* data_col, const int channels, + const int height, const int width, const int psize, const int pad, + const int stride, double* data_im); + + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/insert_splits.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/util/insert_splits.cpp new file mode 100644 index 000000000..b9aeb37c7 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/insert_splits.cpp @@ -0,0 +1,128 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/insert_splits.hpp" + +using std::map; +using std::ostringstream; +using std::pair; +using std::make_pair; + +namespace caffe { + +void InsertSplits(const NetParameter& param, NetParameter* param_split) { + // Initialize by copying from the input NetParameter. + param_split->CopyFrom(param); + param_split->clear_layers(); + map > blob_name_to_last_top_idx; + map, pair > bottom_idx_to_source_top_idx; + map, int> top_idx_to_bottom_count; + map, int> top_idx_to_bottom_split_idx; + map layer_idx_to_layer_name; + layer_idx_to_layer_name[-1] = "input"; + // Determine the number of times each blob is used as an input (bottom) blob. + for (int i = 0; i < param.input_size(); ++i) { + const string& blob_name = param.input(i); + blob_name_to_last_top_idx[blob_name] = make_pair(-1, i); + } + for (int i = 0; i < param.layers_size(); ++i) { + const LayerParameter& layer_param = param.layers(i); + layer_idx_to_layer_name[i] = layer_param.name(); + for (int j = 0; j < layer_param.bottom_size(); ++j) { + const string& blob_name = layer_param.bottom(j); + if (blob_name_to_last_top_idx.find(blob_name) == + blob_name_to_last_top_idx.end()) { + LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; + } + const pair& bottom_idx = make_pair(i, j); + const pair& top_idx = blob_name_to_last_top_idx[blob_name]; + bottom_idx_to_source_top_idx[bottom_idx] = top_idx; + ++top_idx_to_bottom_count[top_idx]; + } + for (int j = 0; j < layer_param.top_size(); ++j) { + const string& blob_name = layer_param.top(j); + blob_name_to_last_top_idx[blob_name] = make_pair(i, j); + } + } + // Create split layer for any input blobs used by other layers as bottom + // blobs more than once. + for (int i = 0; i < param.input_size(); ++i) { + const int split_count = top_idx_to_bottom_count[make_pair(-1, i)]; + if (split_count > 1) { + const string& layer_name = layer_idx_to_layer_name[-1]; + const string& blob_name = param.input(i); + LayerParameter* split_layer_param = param_split->add_layers(); + ConfigureSplitLayer(layer_name, blob_name, i, split_count, + split_layer_param); + } + } + for (int i = 0; i < param.layers_size(); ++i) { + LayerParameter* layer_param = param_split->add_layers(); + layer_param->CopyFrom(param.layers(i)); + // Replace any shared bottom blobs with split layer outputs. + for (int j = 0; j < layer_param->bottom_size(); ++j) { + const pair& top_idx = + bottom_idx_to_source_top_idx[make_pair(i, j)]; + const int split_count = top_idx_to_bottom_count[top_idx]; + if (split_count > 1) { + const string& layer_name = layer_idx_to_layer_name[top_idx.first]; + const string& blob_name = layer_param->bottom(j); + layer_param->set_bottom(j, SplitBlobName(layer_name, + blob_name, top_idx.second, top_idx_to_bottom_split_idx[top_idx]++)); + } + } + // Create split layer for any top blobs used by other layers as bottom + // blobs more than once. + for (int j = 0; j < layer_param->top_size(); ++j) { + const int split_count = top_idx_to_bottom_count[make_pair(i, j)]; + if (split_count > 1) { + const string& layer_name = layer_idx_to_layer_name[i]; + const string& blob_name = layer_param->top(j); + LayerParameter* split_layer_param = param_split->add_layers(); + ConfigureSplitLayer(layer_name, blob_name, j, split_count, + split_layer_param); + } + } + } +} + +void ConfigureSplitLayer(const string& layer_name, const string& blob_name, + const int blob_idx, const int split_count, + LayerParameter* split_layer_param) { + split_layer_param->Clear(); + split_layer_param->add_bottom(blob_name); + split_layer_param->set_name(SplitLayerName(layer_name, blob_name, blob_idx)); + split_layer_param->set_type(LayerParameter_LayerType_SPLIT); + for (int k = 0; k < split_count; ++k) { + split_layer_param->add_top( + SplitBlobName(layer_name, blob_name, blob_idx, k)); + } +} + +string SplitLayerName(const string& layer_name, const string& blob_name, + const int blob_idx) { + ostringstream split_layer_name; + split_layer_name << blob_name << "_" << layer_name << "_" << blob_idx + << "_split"; + return split_layer_name.str(); +} + +string SplitBlobName(const string& layer_name, const string& blob_name, + const int blob_idx, const int split_idx) { + // 0th split top blob is given the same name as the bottom blob so that + // computation is done 'in-place', saving a bit of time and memory. + if (split_idx == 0) { + return blob_name; + } + ostringstream split_blob_name; + split_blob_name << blob_name << "_" << layer_name << "_" << blob_idx + << "_split_" << split_idx; + return split_blob_name.str(); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/io.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/util/io.cpp new file mode 100644 index 000000000..634cdacb5 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/io.cpp @@ -0,0 +1,155 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include // NOLINT(readability/streams) + +#include "caffe/common.hpp" +#include "caffe/util/format.hpp" +#include "caffe/util/io.hpp" +#include "caffe/proto/caffe.pb.h" + +using std::fstream; +using std::ios; +using std::max; +using std::string; +using google::protobuf::io::FileInputStream; +using google::protobuf::io::FileOutputStream; +using google::protobuf::io::ZeroCopyInputStream; +using google::protobuf::io::CodedInputStream; +using google::protobuf::io::ZeroCopyOutputStream; +using google::protobuf::io::CodedOutputStream; +using google::protobuf::Message; + +namespace caffe { + +bool ReadProtoFromTextFile(const char* filename, Message* proto) { + int fd = open(filename, O_RDONLY); + CHECK_NE(fd, -1) << "File not found: " << filename; + FileInputStream* input = new FileInputStream(fd); + bool success = google::protobuf::TextFormat::Parse(input, proto); + delete input; + close(fd); + return success; +} + +void WriteProtoToTextFile(const Message& proto, const char* filename) { + int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); + FileOutputStream* output = new FileOutputStream(fd); + CHECK(google::protobuf::TextFormat::Print(proto, output)); + delete output; + close(fd); +} + +bool ReadProtoFromBinaryFile(const char* filename, Message* proto) { + int fd = open(filename, O_RDONLY); + CHECK_NE(fd, -1) << "File not found: " << filename; + ZeroCopyInputStream* raw_input = new FileInputStream(fd); + CodedInputStream* coded_input = new CodedInputStream(raw_input); + coded_input->SetTotalBytesLimit(1073741824, 536870912); + + bool success = proto->ParseFromCodedStream(coded_input); + + delete coded_input; + delete raw_input; + close(fd); + return success; +} + +void WriteProtoToBinaryFile(const Message& proto, const char* filename) { + fstream output(filename, ios::out | ios::trunc | ios::binary); + CHECK(proto.SerializeToOstream(&output)); +} + +bool ReadImageToDatum(const string& filename, const int label, + const int height, const int width, const bool is_color, Datum* datum) { + int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR : + CV_LOAD_IMAGE_GRAYSCALE); + cv::Mat cv_img = cv::imread(filename, cv_read_flag); + if (!cv_img.data) { + LOG(ERROR) << "Could not open or find file " << filename; + return false; + } + return OpenCVImageToDatum(cv_img, label, height, width, is_color, datum); +} + +// Verifies format of data stored in HDF5 file and reshapes blob accordingly. +template +void hdf5_load_nd_dataset_helper( + hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, + Blob* blob) { + // Verify that the number of dimensions is in the accepted range. + herr_t status; + int ndims; + status = H5LTget_dataset_ndims(file_id, dataset_name_, &ndims); + CHECK_GE(ndims, min_dim); + CHECK_LE(ndims, max_dim); + + // Verify that the data format is what we expect: float or double. + std::vector dims(ndims); + H5T_class_t class_; + status = H5LTget_dataset_info( + file_id, dataset_name_, dims.data(), &class_, NULL); + CHECK_EQ(class_, H5T_FLOAT) << "Expected float or double data"; + + blob->Reshape( + dims[0], + (dims.size() > 1) ? dims[1] : 1, + (dims.size() > 2) ? dims[2] : 1, + (dims.size() > 3) ? dims[3] : 1); +} + +template <> +void hdf5_load_nd_dataset(hid_t file_id, const char* dataset_name_, + int min_dim, int max_dim, Blob* blob) { + hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob); + herr_t status = H5LTread_dataset_float( + file_id, dataset_name_, blob->mutable_cpu_data()); +} + +template <> +void hdf5_load_nd_dataset(hid_t file_id, const char* dataset_name_, + int min_dim, int max_dim, Blob* blob) { + hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob); + herr_t status = H5LTread_dataset_double( + file_id, dataset_name_, blob->mutable_cpu_data()); +} + +template <> +void hdf5_save_nd_dataset( + const hid_t file_id, const string dataset_name, const Blob& blob) { + hsize_t dims[HDF5_NUM_DIMS]; + dims[0] = blob.num(); + dims[1] = blob.channels(); + dims[2] = blob.height(); + dims[3] = blob.width(); + herr_t status = H5LTmake_dataset_float( + file_id, dataset_name.c_str(), HDF5_NUM_DIMS, dims, blob.cpu_data()); + CHECK_GE(status, 0) << "Failed to make float dataset " << dataset_name; +} + +template <> +void hdf5_save_nd_dataset( + const hid_t file_id, const string dataset_name, const Blob& blob) { + hsize_t dims[HDF5_NUM_DIMS]; + dims[0] = blob.num(); + dims[1] = blob.channels(); + dims[2] = blob.height(); + dims[3] = blob.width(); + herr_t status = H5LTmake_dataset_double( + file_id, dataset_name.c_str(), HDF5_NUM_DIMS, dims, blob.cpu_data()); + CHECK_GE(status, 0) << "Failed to make double dataset " << dataset_name; +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/math_functions.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/util/math_functions.cpp new file mode 100644 index 000000000..67274ef73 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/math_functions.cpp @@ -0,0 +1,493 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include + +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +namespace caffe { + +template<> +void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C) { + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cblas_sgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, + ldb, beta, C, N); +} + +template<> +void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C) { + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cblas_dgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B, + ldb, beta, C, N); +} + +template <> +void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C) { + // Note that cublas follows fortran order. + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + cublasOperation_t cuTransB = + (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, + N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); +} + +template <> +void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, + const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C) { + // Note that cublas follows fortran order. + int lda = (TransA == CblasNoTrans) ? K : M; + int ldb = (TransB == CblasNoTrans) ? N : K; + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + cublasOperation_t cuTransB = + (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; + CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, + N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); +} + +template <> +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const float alpha, const float* A, const float* x, + const float beta, float* y) { + cblas_sgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1); +} + +template <> +void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const double alpha, const double* A, const double* x, + const double beta, double* y) { + cblas_dgemv(CblasRowMajor, TransA, M, N, alpha, A, N, x, 1, beta, y, 1); +} + +template <> +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const float alpha, const float* A, const float* x, + const float beta, float* y) { + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; + CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, + A, N, x, 1, &beta, y, 1)); +} + +template <> +void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, + const int N, const double alpha, const double* A, const double* x, + const double beta, double* y) { + cublasOperation_t cuTransA = + (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; + CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, + A, N, x, 1, &beta, y, 1)); +} + +template <> +void caffe_axpy(const int N, const float alpha, const float* X, + float* Y) { cblas_saxpy(N, alpha, X, 1, Y, 1); } + +template <> +void caffe_axpy(const int N, const double alpha, const double* X, + double* Y) { cblas_daxpy(N, alpha, X, 1, Y, 1); } + +template <> +void caffe_gpu_axpy(const int N, const float alpha, const float* X, + float* Y) { + CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); +} + +template <> +void caffe_gpu_axpy(const int N, const double alpha, const double* X, + double* Y) { + CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); +} + +template +void caffe_set(const int N, const Dtype alpha, Dtype* Y) { + if (alpha == 0) { + memset(Y, 0, sizeof(Dtype) * N); + return; + } + for (int i = 0; i < N; ++i) { + Y[i] = alpha; + } +} + +template void caffe_set(const int N, const int alpha, int* Y); +template void caffe_set(const int N, const float alpha, float* Y); +template void caffe_set(const int N, const double alpha, double* Y); + +template <> +void caffe_add_scalar(const int N, const float alpha, float* Y) { + for (int i = 0; i < N; ++i) { + Y[i] += alpha; + } +} + +template <> +void caffe_add_scalar(const int N, const double alpha, double* Y) { + for (int i = 0; i < N; ++i) { + Y[i] += alpha; + } +} + +template <> +void caffe_copy(const int N, const float* X, float* Y) { + cblas_scopy(N, X, 1, Y, 1); +} + +template <> +void caffe_copy(const int N, const double* X, double* Y) { + cblas_dcopy(N, X, 1, Y, 1); +} + +template <> +void caffe_gpu_copy(const int N, const float* X, float* Y) { + CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), N, X, 1, Y, 1)); +} + +template <> +void caffe_gpu_copy(const int N, const double* X, double* Y) { + CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), N, X, 1, Y, 1)); +} + +template <> +void caffe_scal(const int N, const float alpha, float *X) { + cblas_sscal(N, alpha, X, 1); +} + +template <> +void caffe_scal(const int N, const double alpha, double *X) { + cblas_dscal(N, alpha, X, 1); +} + +template <> +void caffe_gpu_scal(const int N, const float alpha, float *X) { + CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); +} + +template <> +void caffe_gpu_scal(const int N, const double alpha, double *X) { + CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); +} + +template <> +void caffe_gpu_axpby(const int N, const float alpha, const float* X, + const float beta, float* Y) { + caffe_gpu_scal(N, beta, Y); + caffe_gpu_axpy(N, alpha, X, Y); +} + +template <> +void caffe_gpu_axpby(const int N, const double alpha, const double* X, + const double beta, double* Y) { + caffe_gpu_scal(N, beta, Y); + caffe_gpu_axpy(N, alpha, X, Y); +} + +template <> +void caffe_cpu_axpby(const int N, const float alpha, const float* X, + const float beta, float* Y) { + cblas_saxpby(N, alpha, X, 1, beta, Y, 1); +} + +template <> +void caffe_cpu_axpby(const int N, const double alpha, const double* X, + const double beta, double* Y) { + cblas_daxpby(N, alpha, X, 1, beta, Y, 1); +} + +template <> +void caffe_add(const int n, const float* a, const float* b, + float* y) { + vsAdd(n, a, b, y); +} + +template <> +void caffe_add(const int n, const double* a, const double* b, + double* y) { + vdAdd(n, a, b, y); +} + +template <> +void caffe_sub(const int n, const float* a, const float* b, + float* y) { + vsSub(n, a, b, y); +} + +template <> +void caffe_sub(const int n, const double* a, const double* b, + double* y) { + vdSub(n, a, b, y); +} + +template <> +void caffe_mul(const int n, const float* a, const float* b, + float* y) { + vsMul(n, a, b, y); +} + +template <> +void caffe_mul(const int n, const double* a, const double* b, + double* y) { + vdMul(n, a, b, y); +} + +template <> +void caffe_div(const int n, const float* a, const float* b, + float* y) { + vsDiv(n, a, b, y); +} + +template <> +void caffe_div(const int n, const double* a, const double* b, + double* y) { + vdDiv(n, a, b, y); +} + +template <> +void caffe_powx(const int n, const float* a, const float b, + float* y) { + vsPowx(n, a, b, y); +} + +template <> +void caffe_powx(const int n, const double* a, const double b, + double* y) { + vdPowx(n, a, b, y); +} + +template <> +void caffe_sqr(const int n, const float* a, float* y) { + vsSqr(n, a, y); +} + +template <> +void caffe_sqr(const int n, const double* a, double* y) { + vdSqr(n, a, y); +} + +template <> +void caffe_exp(const int n, const float* a, float* y) { + vsExp(n, a, y); +} + +template <> +void caffe_exp(const int n, const double* a, double* y) { + vdExp(n, a, y); +} + +unsigned int caffe_rng_rand() { + return (*caffe_rng())(); +} + +template +Dtype caffe_nextafter(const Dtype b) { + return boost::math::nextafter( + b, std::numeric_limits::max()); +} + +template +float caffe_nextafter(const float b); + +template +double caffe_nextafter(const double b); + +template +void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r) { + CHECK_GE(n, 0); + CHECK(r); + CHECK_LE(a, b); + boost::uniform_real random_distribution(a, caffe_nextafter(b)); + boost::variate_generator > + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = variate_generator(); + } +} + +template +void caffe_rng_uniform(const int n, const float a, const float b, + float* r); + +template +void caffe_rng_uniform(const int n, const double a, const double b, + double* r); + +template +void caffe_rng_gaussian(const int n, const Dtype a, + const Dtype sigma, Dtype* r) { + CHECK_GE(n, 0); + CHECK(r); + CHECK_GT(sigma, 0); + boost::normal_distribution random_distribution(a, sigma); + boost::variate_generator > + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = variate_generator(); + } +} + +template +void caffe_rng_gaussian(const int n, const float mu, + const float sigma, float* r); + +template +void caffe_rng_gaussian(const int n, const double mu, + const double sigma, double* r); + +template +void caffe_rng_bernoulli(const int n, const Dtype p, int* r) { + CHECK_GE(n, 0); + CHECK(r); + CHECK_GE(p, 0); + CHECK_LE(p, 1); + boost::bernoulli_distribution random_distribution(p); + boost::variate_generator > + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = variate_generator(); + } +} + +template +void caffe_rng_bernoulli(const int n, const double p, int* r); + +template +void caffe_rng_bernoulli(const int n, const float p, int* r); + +template +void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r) { + CHECK_GE(n, 0); + CHECK(r); + CHECK_GE(p, 0); + CHECK_LE(p, 1); + boost::bernoulli_distribution random_distribution(p); + boost::variate_generator > + variate_generator(caffe_rng(), random_distribution); + for (int i = 0; i < n; ++i) { + r[i] = static_cast(variate_generator()); + } +} + +template +void caffe_rng_bernoulli(const int n, const double p, unsigned int* r); + +template +void caffe_rng_bernoulli(const int n, const float p, unsigned int* r); + +template <> +float caffe_cpu_dot(const int n, const float* x, const float* y) { + return cblas_sdot(n, x, 1, y, 1); +} + +template <> +double caffe_cpu_dot(const int n, const double* x, const double* y) { + return cblas_ddot(n, x, 1, y, 1); +} + +template <> +void caffe_gpu_dot(const int n, const float* x, const float* y, + float* out) { + CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); +} + +template <> +void caffe_gpu_dot(const int n, const double* x, const double* y, + double * out) { + CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); +} + +template <> +int caffe_cpu_hamming_distance(const int n, const float* x, + const float* y) { + int dist = 0; + for (int i = 0; i < n; ++i) { + dist += __builtin_popcount(static_cast(x[i]) ^ + static_cast(y[i])); + } + return dist; +} + +template <> +int caffe_cpu_hamming_distance(const int n, const double* x, + const double* y) { + int dist = 0; + for (int i = 0; i < n; ++i) { + dist += __builtin_popcountl(static_cast(x[i]) ^ + static_cast(y[i])); + } + return dist; +} + +template <> +float caffe_cpu_asum(const int n, const float* x) { + return cblas_sasum(n, x, 1); +} + +template <> +double caffe_cpu_asum(const int n, const double* x) { + return cblas_dasum(n, x, 1); +} + +template <> +void caffe_gpu_asum(const int n, const float* x, float* y) { + CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); +} + +template <> +void caffe_gpu_asum(const int n, const double* x, double* y) { + CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); +} + +INSTANTIATE_CAFFE_CPU_UNARY_FUNC(sign); +INSTANTIATE_CAFFE_CPU_UNARY_FUNC(sgnbit); +INSTANTIATE_CAFFE_CPU_UNARY_FUNC(fabs); + +template <> +void caffe_cpu_scale(const int n, const float alpha, const float *x, + float* y) { + cblas_scopy(n, x, 1, y, 1); + cblas_sscal(n, alpha, y, 1); +} + +template <> +void caffe_cpu_scale(const int n, const double alpha, const double *x, + double* y) { + cblas_dcopy(n, x, 1, y, 1); + cblas_dscal(n, alpha, y, 1); +} + +template <> +void caffe_gpu_scale(const int n, const float alpha, const float *x, + float* y) { + CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); + CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); +} + +template <> +void caffe_gpu_scale(const int n, const double alpha, const double *x, + double* y) { + CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); + CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/math_functions.cu b/modules/dnns_easily_fooled/caffe/src/caffe/util/math_functions.cu new file mode 100644 index 000000000..63c8fac69 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/math_functions.cu @@ -0,0 +1,280 @@ +// Copyright 2014 BVLC and contributors. + +#include // CUDA's, not caffe's, for fabs, signbit +#include +#include // thrust::plus +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = alpha; + } +} + +template <> +void caffe_gpu_set(const int N, const float alpha, float* Y) { + if (alpha == 0) { + CUDA_CHECK(cudaMemset(Y, 0, sizeof(float) * N)); + return; + } + // NOLINT_NEXT_LINE(whitespace/operators) + set_kernel<<>>( + N, alpha, Y); +} + +template <> +void caffe_gpu_set(const int N, const double alpha, double* Y) { + if (alpha == 0) { + CUDA_CHECK(cudaMemset(Y, 0, sizeof(double) * N)); + return; + } + // NOLINT_NEXT_LINE(whitespace/operators) + set_kernel<<>>( + N, alpha, Y); +} + +template +__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] += alpha; + } +} + +template <> +void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_scalar_kernel<<>>( + N, alpha, Y); +} + +template <> +void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_scalar_kernel<<>>( + N, alpha, Y); +} + +template +__global__ void add_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] + b[index]; + } +} + +template <> +void caffe_gpu_add(const int N, const float* a, const float* b, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_kernel<<>>( + N, a, b, y); +} + +template <> +void caffe_gpu_add(const int N, const double* a, const double* b, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + add_kernel<<>>( + N, a, b, y); +} + +template +__global__ void sub_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] - b[index]; + } +} + +template <> +void caffe_gpu_sub(const int N, const float* a, const float* b, + float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + sub_kernel<<>>( + N, a, b, y); +} + +template <> +void caffe_gpu_sub(const int N, const double* a, const double* b, + double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + sub_kernel<<>>( + N, a, b, y); +} + +template +__global__ void mul_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] * b[index]; + } +} + +template <> +void caffe_gpu_mul(const int N, const float* a, + const float* b, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + mul_kernel<<>>( + N, a, b, y); +} + +template <> +void caffe_gpu_mul(const int N, const double* a, + const double* b, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + mul_kernel<<>>( + N, a, b, y); +} + +template +__global__ void div_kernel(const int n, const Dtype* a, + const Dtype* b, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = a[index] / b[index]; + } +} + +template <> +void caffe_gpu_div(const int N, const float* a, + const float* b, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + div_kernel<<>>( + N, a, b, y); +} + +template <> +void caffe_gpu_div(const int N, const double* a, + const double* b, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + div_kernel<<>>( + N, a, b, y); +} + +template +__global__ void powx_kernel(const int n, const Dtype* a, + const Dtype alpha, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = pow(a[index], alpha); + } +} + +template <> +void caffe_gpu_powx(const int N, const float* a, + const float alpha, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + powx_kernel<<>>( + N, a, alpha, y); +} + +template <> +void caffe_gpu_powx(const int N, const double* a, + const double alpha, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + powx_kernel<<>>( + N, a, alpha, y); +} + +DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) + - (x[index] < Dtype(0))); +DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); +DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(fabs, y[index] = fabs(x[index])); + +__global__ void popc_kernel(const int n, const float* a, + const float* b, uint8_t* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = __popc(static_cast(a[index]) ^ + static_cast(b[index])); + } +} + +__global__ void popcll_kernel(const int n, const double* a, + const double* b, uint8_t* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = __popcll(static_cast(a[index]) ^ + static_cast(b[index])); + } +} + +template <> +uint32_t caffe_gpu_hamming_distance(const int n, const float* x, + const float* y) { + // TODO: Fix caffe_gpu_hamming_distance (see failing unit test + // TestHammingDistanceGPU in test_math_functions.cpp). + NOT_IMPLEMENTED; + thrust::device_vector popcounts(n); + // NOLINT_NEXT_LINE(whitespace/operators) + popc_kernel<<>>( + n, x, y, thrust::raw_pointer_cast(popcounts.data())); + return thrust::reduce(popcounts.begin(), popcounts.end(), + (uint32_t) 0, thrust::plus()); +} + +template <> +uint32_t caffe_gpu_hamming_distance(const int n, const double* x, + const double* y) { + // TODO: Fix caffe_gpu_hamming_distance (see failing unit test + // TestHammingDistanceGPU in test_math_functions.cpp). + NOT_IMPLEMENTED; + thrust::device_vector popcounts(n); + // NOLINT_NEXT_LINE(whitespace/operators) + popcll_kernel<<>>( + n, x, y, thrust::raw_pointer_cast(popcounts.data())); + return thrust::reduce(popcounts.begin(), popcounts.end(), + /* NOLINT_NEXT_LINE(build/include_what_you_use) */ + (uint32_t) 0, thrust::plus()); +} + +void caffe_gpu_rng_uniform(const int n, unsigned int* r) { + CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); +} + +template <> +void caffe_gpu_rng_uniform(const int n, const float a, const float b, + float* r) { + CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); + const float range = b - a; + if (range != static_cast(1)) { + caffe_gpu_scal(n, range, r); + } + if (a != static_cast(0)) { + caffe_gpu_add_scalar(n, a, r); + } +} + +template <> +void caffe_gpu_rng_uniform(const int n, const double a, const double b, + double* r) { + CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); + const double range = b - a; + if (range != static_cast(1)) { + caffe_gpu_scal(n, range, r); + } + if (a != static_cast(0)) { + caffe_gpu_add_scalar(n, a, r); + } +} + +template <> +void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, + float* r) { + CURAND_CHECK( + curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); +} + +template <> +void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, + double* r) { + CURAND_CHECK( + curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/caffe/util/upgrade_proto.cpp b/modules/dnns_easily_fooled/caffe/src/caffe/util/upgrade_proto.cpp new file mode 100644 index 000000000..e079b422d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/caffe/util/upgrade_proto.cpp @@ -0,0 +1,615 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include +#include + +#include "caffe/common.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/upgrade_proto.hpp" +#include "caffe/proto/caffe.pb.h" + +using std::map; +using std::string; + +namespace caffe { + +bool NetNeedsUpgrade(const NetParameter& net_param) { + for (int i = 0; i < net_param.layers_size(); ++i) { + if (net_param.layers(i).has_layer()) { + return true; + } + } + return false; +} + +bool UpgradeV0Net(const NetParameter& v0_net_param_padding_layers, + NetParameter* net_param) { + // First upgrade padding layers to padded conv layers. + NetParameter v0_net_param; + UpgradeV0PaddingLayers(v0_net_param_padding_layers, &v0_net_param); + // Now upgrade layer parameters. + bool is_fully_compatible = true; + net_param->Clear(); + if (v0_net_param.has_name()) { + net_param->set_name(v0_net_param.name()); + } + for (int i = 0; i < v0_net_param.layers_size(); ++i) { + is_fully_compatible &= UpgradeLayerParameter(v0_net_param.layers(i), + net_param->add_layers()); + } + for (int i = 0; i < v0_net_param.input_size(); ++i) { + net_param->add_input(v0_net_param.input(i)); + } + for (int i = 0; i < v0_net_param.input_dim_size(); ++i) { + net_param->add_input_dim(v0_net_param.input_dim(i)); + } + if (v0_net_param.has_force_backward()) { + net_param->set_force_backward(v0_net_param.force_backward()); + } + return is_fully_compatible; +} + +void UpgradeV0PaddingLayers(const NetParameter& param, + NetParameter* param_upgraded_pad) { + // Copy everything other than the layers from the original param. + param_upgraded_pad->Clear(); + param_upgraded_pad->CopyFrom(param); + param_upgraded_pad->clear_layers(); + // Figure out which layer each bottom blob comes from. + map blob_name_to_last_top_idx; + for (int i = 0; i < param.input_size(); ++i) { + const string& blob_name = param.input(i); + blob_name_to_last_top_idx[blob_name] = -1; + } + for (int i = 0; i < param.layers_size(); ++i) { + const LayerParameter& layer_connection = param.layers(i); + const V0LayerParameter& layer_param = layer_connection.layer(); + // Add the layer to the new net, unless it's a padding layer. + if (layer_param.type() != "padding") { + param_upgraded_pad->add_layers()->CopyFrom(layer_connection); + } + for (int j = 0; j < layer_connection.bottom_size(); ++j) { + const string& blob_name = layer_connection.bottom(j); + if (blob_name_to_last_top_idx.find(blob_name) == + blob_name_to_last_top_idx.end()) { + LOG(FATAL) << "Unknown blob input " << blob_name << " to layer " << j; + } + const int top_idx = blob_name_to_last_top_idx[blob_name]; + if (top_idx == -1) { + continue; + } + LayerParameter source_layer = param.layers(top_idx); + if (source_layer.layer().type() == "padding") { + // This layer has a padding layer as input -- check that it is a conv + // layer and takes only one input. Also check that the padding layer + // input has only one input and one output. Other cases have undefined + // behavior in Caffe. + CHECK_EQ(layer_param.type(), "conv") << "Padding layer input to " + "non-convolutional layer type " << layer_param.type(); + CHECK_EQ(layer_connection.bottom_size(), 1) + << "Conv Layer takes a single blob as input."; + CHECK_EQ(source_layer.bottom_size(), 1) + << "Padding Layer takes a single blob as input."; + CHECK_EQ(source_layer.top_size(), 1) + << "Padding Layer produces a single blob as output."; + int layer_index = param_upgraded_pad->layers_size() - 1; + param_upgraded_pad->mutable_layers(layer_index)->mutable_layer() + ->set_pad(source_layer.layer().pad()); + param_upgraded_pad->mutable_layers(layer_index) + ->set_bottom(j, source_layer.bottom(0)); + } + } + for (int j = 0; j < layer_connection.top_size(); ++j) { + const string& blob_name = layer_connection.top(j); + blob_name_to_last_top_idx[blob_name] = i; + } + } +} + +bool UpgradeLayerParameter(const LayerParameter& v0_layer_connection, + LayerParameter* layer_param) { + bool is_fully_compatible = true; + layer_param->Clear(); + for (int i = 0; i < v0_layer_connection.bottom_size(); ++i) { + layer_param->add_bottom(v0_layer_connection.bottom(i)); + } + for (int i = 0; i < v0_layer_connection.top_size(); ++i) { + layer_param->add_top(v0_layer_connection.top(i)); + } + if (v0_layer_connection.has_layer()) { + const V0LayerParameter& v0_layer_param = v0_layer_connection.layer(); + if (v0_layer_param.has_name()) { + layer_param->set_name(v0_layer_param.name()); + } + const string& type = v0_layer_param.type(); + if (v0_layer_param.has_type()) { + layer_param->set_type(UpgradeV0LayerType(type)); + } + for (int i = 0; i < v0_layer_param.blobs_size(); ++i) { + layer_param->add_blobs()->CopyFrom(v0_layer_param.blobs(i)); + } + for (int i = 0; i < v0_layer_param.blobs_lr_size(); ++i) { + layer_param->add_blobs_lr(v0_layer_param.blobs_lr(i)); + } + for (int i = 0; i < v0_layer_param.weight_decay_size(); ++i) { + layer_param->add_weight_decay(v0_layer_param.weight_decay(i)); + } + if (v0_layer_param.has_num_output()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_num_output( + v0_layer_param.num_output()); + } else if (type == "innerproduct") { + layer_param->mutable_inner_product_param()->set_num_output( + v0_layer_param.num_output()); + } else { + LOG(ERROR) << "Unknown parameter num_output for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_biasterm()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_bias_term( + v0_layer_param.biasterm()); + } else if (type == "innerproduct") { + layer_param->mutable_inner_product_param()->set_bias_term( + v0_layer_param.biasterm()); + } else { + LOG(ERROR) << "Unknown parameter biasterm for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_weight_filler()) { + if (type == "conv") { + layer_param->mutable_convolution_param()-> + mutable_weight_filler()->CopyFrom(v0_layer_param.weight_filler()); + } else if (type == "innerproduct") { + layer_param->mutable_inner_product_param()-> + mutable_weight_filler()->CopyFrom(v0_layer_param.weight_filler()); + } else { + LOG(ERROR) << "Unknown parameter weight_filler for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_bias_filler()) { + if (type == "conv") { + layer_param->mutable_convolution_param()-> + mutable_bias_filler()->CopyFrom(v0_layer_param.bias_filler()); + } else if (type == "innerproduct") { + layer_param->mutable_inner_product_param()-> + mutable_bias_filler()->CopyFrom(v0_layer_param.bias_filler()); + } else { + LOG(ERROR) << "Unknown parameter bias_filler for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_pad()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_pad(v0_layer_param.pad()); + } else { + LOG(ERROR) << "Unknown parameter pad for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_kernelsize()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_kernel_size( + v0_layer_param.kernelsize()); + } else if (type == "pool") { + layer_param->mutable_pooling_param()->set_kernel_size( + v0_layer_param.kernelsize()); + } else { + LOG(ERROR) << "Unknown parameter kernelsize for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_group()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_group( + v0_layer_param.group()); + } else { + LOG(ERROR) << "Unknown parameter group for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_stride()) { + if (type == "conv") { + layer_param->mutable_convolution_param()->set_stride( + v0_layer_param.stride()); + } else if (type == "pool") { + layer_param->mutable_pooling_param()->set_stride( + v0_layer_param.stride()); + } else { + LOG(ERROR) << "Unknown parameter stride for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_pool()) { + if (type == "pool") { + V0LayerParameter_PoolMethod pool = v0_layer_param.pool(); + switch (pool) { + case V0LayerParameter_PoolMethod_MAX: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_MAX); + break; + case V0LayerParameter_PoolMethod_AVE: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_AVE); + break; + case V0LayerParameter_PoolMethod_STOCHASTIC: + layer_param->mutable_pooling_param()->set_pool( + PoolingParameter_PoolMethod_STOCHASTIC); + break; + default: + LOG(ERROR) << "Unknown pool method " << pool; + is_fully_compatible = false; + } + } else { + LOG(ERROR) << "Unknown parameter pool for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_dropout_ratio()) { + if (type == "dropout") { + layer_param->mutable_dropout_param()->set_dropout_ratio( + v0_layer_param.dropout_ratio()); + } else { + LOG(ERROR) << "Unknown parameter dropout_ratio for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_local_size()) { + if (type == "lrn") { + layer_param->mutable_lrn_param()->set_local_size( + v0_layer_param.local_size()); + } else { + LOG(ERROR) << "Unknown parameter local_size for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_alpha()) { + if (type == "lrn") { + layer_param->mutable_lrn_param()->set_alpha(v0_layer_param.alpha()); + } else { + LOG(ERROR) << "Unknown parameter alpha for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_beta()) { + if (type == "lrn") { + layer_param->mutable_lrn_param()->set_beta(v0_layer_param.beta()); + } else { + LOG(ERROR) << "Unknown parameter beta for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_source()) { + if (type == "data") { + layer_param->mutable_data_param()->set_source(v0_layer_param.source()); + } else if (type == "hdf5_data") { + layer_param->mutable_hdf5_data_param()->set_source( + v0_layer_param.source()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_source( + v0_layer_param.source()); + } else if (type == "window_data") { + layer_param->mutable_window_data_param()->set_source( + v0_layer_param.source()); + } else if (type == "infogain_loss") { + layer_param->mutable_infogain_loss_param()->set_source( + v0_layer_param.source()); + } else { + LOG(ERROR) << "Unknown parameter source for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_scale()) { + if (type == "data") { + layer_param->mutable_data_param()->set_scale(v0_layer_param.scale()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_scale( + v0_layer_param.scale()); + } else { + LOG(ERROR) << "Unknown parameter scale for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_meanfile()) { + if (type == "data") { + layer_param->mutable_data_param()->set_mean_file( + v0_layer_param.meanfile()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_mean_file( + v0_layer_param.meanfile()); + } else if (type == "window_data") { + layer_param->mutable_window_data_param()->set_mean_file( + v0_layer_param.meanfile()); + } else { + LOG(ERROR) << "Unknown parameter meanfile for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_batchsize()) { + if (type == "data") { + layer_param->mutable_data_param()->set_batch_size( + v0_layer_param.batchsize()); + } else if (type == "hdf5_data") { + layer_param->mutable_hdf5_data_param()->set_batch_size( + v0_layer_param.batchsize()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_batch_size( + v0_layer_param.batchsize()); + } else if (type == "window_data") { + layer_param->mutable_window_data_param()->set_batch_size( + v0_layer_param.batchsize()); + } else { + LOG(ERROR) << "Unknown parameter batchsize for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_cropsize()) { + if (type == "data") { + layer_param->mutable_data_param()->set_crop_size( + v0_layer_param.cropsize()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_crop_size( + v0_layer_param.cropsize()); + } else if (type == "window_data") { + layer_param->mutable_window_data_param()->set_crop_size( + v0_layer_param.cropsize()); + } else { + LOG(ERROR) << "Unknown parameter cropsize for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_mirror()) { + if (type == "data") { + layer_param->mutable_data_param()->set_mirror(v0_layer_param.mirror()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_mirror( + v0_layer_param.mirror()); + } else if (type == "window_data") { + layer_param->mutable_window_data_param()->set_mirror( + v0_layer_param.mirror()); + } else { + LOG(ERROR) << "Unknown parameter mirror for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_rand_skip()) { + if (type == "data") { + layer_param->mutable_data_param()->set_rand_skip( + v0_layer_param.rand_skip()); + } else if (type == "images") { + layer_param->mutable_image_data_param()->set_rand_skip( + v0_layer_param.rand_skip()); + } else { + LOG(ERROR) << "Unknown parameter rand_skip for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_shuffle_images()) { + if (type == "images") { + layer_param->mutable_image_data_param()->set_shuffle( + v0_layer_param.shuffle_images()); + } else { + LOG(ERROR) << "Unknown parameter shuffle for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_new_height()) { + if (type == "images") { + layer_param->mutable_image_data_param()->set_new_height( + v0_layer_param.new_height()); + } else { + LOG(ERROR) << "Unknown parameter new_height for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_new_width()) { + if (type == "images") { + layer_param->mutable_image_data_param()->set_new_width( + v0_layer_param.new_width()); + } else { + LOG(ERROR) << "Unknown parameter new_width for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_concat_dim()) { + if (type == "concat") { + layer_param->mutable_concat_param()->set_concat_dim( + v0_layer_param.concat_dim()); + } else { + LOG(ERROR) << "Unknown parameter concat_dim for layer type " << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_fg_threshold()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_fg_threshold( + v0_layer_param.det_fg_threshold()); + } else { + LOG(ERROR) << "Unknown parameter det_fg_threshold for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_bg_threshold()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_bg_threshold( + v0_layer_param.det_bg_threshold()); + } else { + LOG(ERROR) << "Unknown parameter det_bg_threshold for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_fg_fraction()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_fg_fraction( + v0_layer_param.det_fg_fraction()); + } else { + LOG(ERROR) << "Unknown parameter det_fg_fraction for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_context_pad()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_context_pad( + v0_layer_param.det_context_pad()); + } else { + LOG(ERROR) << "Unknown parameter det_context_pad for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_det_crop_mode()) { + if (type == "window_data") { + layer_param->mutable_window_data_param()->set_crop_mode( + v0_layer_param.det_crop_mode()); + } else { + LOG(ERROR) << "Unknown parameter det_crop_mode for layer type " + << type; + is_fully_compatible = false; + } + } + if (v0_layer_param.has_hdf5_output_param()) { + if (type == "hdf5_output") { + layer_param->mutable_hdf5_output_param()->CopyFrom( + v0_layer_param.hdf5_output_param()); + } else { + LOG(ERROR) << "Unknown parameter hdf5_output_param for layer type " + << type; + is_fully_compatible = false; + } + } + } + return is_fully_compatible; +} + +LayerParameter_LayerType UpgradeV0LayerType(const string& type) { + if (type == "accuracy") { + return LayerParameter_LayerType_ACCURACY; + } else if (type == "bnll") { + return LayerParameter_LayerType_BNLL; + } else if (type == "concat") { + return LayerParameter_LayerType_CONCAT; + } else if (type == "conv") { + return LayerParameter_LayerType_CONVOLUTION; + } else if (type == "data") { + return LayerParameter_LayerType_DATA; + } else if (type == "dropout") { + return LayerParameter_LayerType_DROPOUT; + } else if (type == "euclidean_loss") { + return LayerParameter_LayerType_EUCLIDEAN_LOSS; + } else if (type == "flatten") { + return LayerParameter_LayerType_FLATTEN; + } else if (type == "hdf5_data") { + return LayerParameter_LayerType_HDF5_DATA; + } else if (type == "hdf5_output") { + return LayerParameter_LayerType_HDF5_OUTPUT; + } else if (type == "im2col") { + return LayerParameter_LayerType_IM2COL; + } else if (type == "images") { + return LayerParameter_LayerType_IMAGE_DATA; + } else if (type == "infogain_loss") { + return LayerParameter_LayerType_INFOGAIN_LOSS; + } else if (type == "innerproduct") { + return LayerParameter_LayerType_INNER_PRODUCT; + } else if (type == "lrn") { + return LayerParameter_LayerType_LRN; + } else if (type == "multinomial_logistic_loss") { + return LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS; + } else if (type == "pool") { + return LayerParameter_LayerType_POOLING; + } else if (type == "relu") { + return LayerParameter_LayerType_RELU; + } else if (type == "sigmoid") { + return LayerParameter_LayerType_SIGMOID; + } else if (type == "softmax") { + return LayerParameter_LayerType_SOFTMAX; + } else if (type == "softmax_loss") { + return LayerParameter_LayerType_SOFTMAX_LOSS; + } else if (type == "split") { + return LayerParameter_LayerType_SPLIT; + } else if (type == "tanh") { + return LayerParameter_LayerType_TANH; + } else if (type == "window_data") { + return LayerParameter_LayerType_WINDOW_DATA; + } else { + LOG(FATAL) << "Unknown layer name: " << type; + return LayerParameter_LayerType_NONE; + } +} + +void NetParameterToPrettyPrint(const NetParameter& param, + NetParameterPrettyPrint* pretty_param) { + pretty_param->Clear(); + if (param.has_name()) { + pretty_param->set_name(param.name()); + } + if (param.has_force_backward()) { + pretty_param->set_force_backward(param.force_backward()); + } + for (int i = 0; i < param.input_size(); ++i) { + pretty_param->add_input(param.input(i)); + } + for (int i = 0; i < param.input_dim_size(); ++i) { + pretty_param->add_input_dim(param.input_dim(i)); + } + for (int i = 0; i < param.layers_size(); ++i) { + pretty_param->add_layers()->CopyFrom(param.layers(i)); + } +} + +void ReadNetParamsFromTextFileOrDie(const string& param_file, + NetParameter* param) { + CHECK(ReadProtoFromTextFile(param_file, param)) + << "Failed to parse NetParameter file: " << param_file; + if (NetNeedsUpgrade(*param)) { + // NetParameter was specified using the old style (V0LayerParameter); try to + // upgrade it. + LOG(ERROR) << "Attempting to upgrade input file specified using deprecated " + << "V0LayerParameter: " << param_file; + NetParameter original_param(*param); + if (!UpgradeV0Net(original_param, param)) { + LOG(ERROR) << "Warning: had one or more problems upgrading " + << "V0NetParameter to NetParameter (see above); continuing anyway."; + } else { + LOG(INFO) << "Successfully upgraded file specified using deprecated " + << "V0LayerParameter"; + } + LOG(ERROR) << "Note that future Caffe releases will not support " + << "V0NetParameter; use ./build/tools/upgrade_net_proto_text.bin to " + << "upgrade this and any other network proto files to the new format."; + } +} + +void ReadNetParamsFromBinaryFileOrDie(const string& param_file, + NetParameter* param) { + CHECK(ReadProtoFromBinaryFile(param_file, param)) + << "Failed to parse NetParameter file: " << param_file; + if (NetNeedsUpgrade(*param)) { + // NetParameter was specified using the old style (V0LayerParameter); try to + // upgrade it. + LOG(ERROR) << "Attempting to upgrade input file specified using deprecated " + << "V0LayerParameter: " << param_file; + NetParameter original_param(*param); + if (!UpgradeV0Net(original_param, param)) { + LOG(ERROR) << "Warning: had one or more problems upgrading " + << "V0NetParameter to NetParameter (see above); continuing anyway."; + } else { + LOG(INFO) << "Successfully upgraded file specified using deprecated " + << "V0LayerParameter"; + } + LOG(ERROR) << "Note that future Caffe releases will not support " + << "V0NetParameter; use ./build/tools/upgrade_net_proto_binary.bin to " + << "upgrade this and any other network proto files to the new format."; + } +} + +} // namespace caffe diff --git a/modules/dnns_easily_fooled/caffe/src/gtest/gtest-all.cpp b/modules/dnns_easily_fooled/caffe/src/gtest/gtest-all.cpp new file mode 100644 index 000000000..5ced66a90 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/gtest/gtest-all.cpp @@ -0,0 +1,9118 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// Google C++ Testing Framework (Google Test) +// +// Sometimes it's desirable to build Google Test by compiling a single file. +// This file serves this purpose. + +// This line ensures that gtest.h can be compiled on its own, even +// when it's fused. +#include "gtest/gtest.h" + +// The following lines pull in the real gtest *.cc files. +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) + +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Utilities for testing Google Test itself and code that uses Google Test +// (e.g. frameworks built on top of Google Test). + +#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_ +#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + + +namespace testing { + +// This helper class can be used to mock out Google Test failure reporting +// so that we can test Google Test or code that builds on Google Test. +// +// An object of this class appends a TestPartResult object to the +// TestPartResultArray object given in the constructor whenever a Google Test +// failure is reported. It can either intercept only failures that are +// generated in the same thread that created this object or it can intercept +// all generated failures. The scope of this mock object can be controlled with +// the second argument to the two arguments constructor. +class GTEST_API_ ScopedFakeTestPartResultReporter + : public TestPartResultReporterInterface { + public: + // The two possible mocking modes of this object. + enum InterceptMode { + INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures. + INTERCEPT_ALL_THREADS // Intercepts all failures. + }; + + // The c'tor sets this object as the test part result reporter used + // by Google Test. The 'result' parameter specifies where to report the + // results. This reporter will only catch failures generated in the current + // thread. DEPRECATED + explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result); + + // Same as above, but you can choose the interception scope of this object. + ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, + TestPartResultArray* result); + + // The d'tor restores the previous test part result reporter. + virtual ~ScopedFakeTestPartResultReporter(); + + // Appends the TestPartResult object to the TestPartResultArray + // received in the constructor. + // + // This method is from the TestPartResultReporterInterface + // interface. + virtual void ReportTestPartResult(const TestPartResult& result); + private: + void Init(); + + const InterceptMode intercept_mode_; + TestPartResultReporterInterface* old_reporter_; + TestPartResultArray* const result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); +}; + +namespace internal { + +// A helper class for implementing EXPECT_FATAL_FAILURE() and +// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +class GTEST_API_ SingleFailureChecker { + public: + // The constructor remembers the arguments. + SingleFailureChecker(const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr); + ~SingleFailureChecker(); + private: + const TestPartResultArray* const results_; + const TestPartResult::Type type_; + const string substr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker); +}; + +} // namespace internal + +} // namespace testing + +// A set of macros for testing Google Test assertions or code that's expected +// to generate Google Test fatal failures. It verifies that the given +// statement will cause exactly one fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_FATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - 'statement' cannot reference local non-static variables or +// non-static members of the current object. +// - 'statement' cannot return a value. +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. The AcceptsMacroThatExpandsToUnprotectedComma test in +// gtest_unittest.cc will fail to compile if we do that. +#define EXPECT_FATAL_FAILURE(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ALL_THREADS, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +// A macro for testing Google Test assertions or code that's expected to +// generate Google Test non-fatal failures. It asserts that the given +// statement will cause exactly one non-fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// 'statement' is allowed to reference local variables and members of +// the current object. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. If we do that, the code won't compile when the user gives +// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that +// expands to code containing an unprotected comma. The +// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc +// catches that. +// +// For the same reason, we have to write +// if (::testing::internal::AlwaysTrue()) { statement; } +// instead of +// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) +// to avoid an MSVC warning on unreachable code. +#define EXPECT_NONFATAL_FAILURE(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS,\ + >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include // NOLINT +#include +#include + +#if GTEST_OS_LINUX + +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +# include // NOLINT +# include // NOLINT +# include // NOLINT +// Declares vsnprintf(). This header is not available on Windows. +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include + +#elif GTEST_OS_SYMBIAN +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +#elif GTEST_OS_ZOS +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +// On z/OS we additionally need strings.h for strcasecmp. +# include // NOLINT + +#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE. + +# include // NOLINT + +#elif GTEST_OS_WINDOWS // We are on Windows proper. + +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT + +# if GTEST_OS_WINDOWS_MINGW +// MinGW has gettimeofday() but not _ftime64(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +// TODO(kenton@google.com): There are other ways to get the time on +// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW +// supports these. consider using them instead. +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT +# endif // GTEST_OS_WINDOWS_MINGW + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT + +#else + +// Assume other platforms have gettimeofday(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT +# include // NOLINT + +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Utility functions and classes used by the Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) +// +// This file contains purely Google Test's internal implementation. Please +// DO NOT #INCLUDE IT IN A USER PROGRAM. + +#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_ +#define GTEST_SRC_GTEST_INTERNAL_INL_H_ + +// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is +// part of Google Test's implementation; otherwise it's undefined. +#if !GTEST_IMPLEMENTATION_ +// A user is trying to include this from his code - just say no. +# error "gtest-internal-inl.h is part of Google Test's internal implementation." +# error "It must not be included except by Google Test itself." +#endif // GTEST_IMPLEMENTATION_ + +#ifndef _WIN32_WCE +# include +#endif // !_WIN32_WCE +#include +#include // For strtoll/_strtoul64/malloc/free. +#include // For memmove. + +#include +#include +#include + + +#if GTEST_OS_WINDOWS +# include // NOLINT +#endif // GTEST_OS_WINDOWS + + +namespace testing { + +// Declares the flags. +// +// We don't want the users to modify this flag in the code, but want +// Google Test's own unit tests to be able to access it. Therefore we +// declare it here as opposed to in gtest.h. +GTEST_DECLARE_bool_(death_test_use_fork); + +namespace internal { + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest; + +// Names of the flags (needed for parsing Google Test flags). +const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests"; +const char kBreakOnFailureFlag[] = "break_on_failure"; +const char kCatchExceptionsFlag[] = "catch_exceptions"; +const char kColorFlag[] = "color"; +const char kFilterFlag[] = "filter"; +const char kListTestsFlag[] = "list_tests"; +const char kOutputFlag[] = "output"; +const char kPrintTimeFlag[] = "print_time"; +const char kRandomSeedFlag[] = "random_seed"; +const char kRepeatFlag[] = "repeat"; +const char kShuffleFlag[] = "shuffle"; +const char kStackTraceDepthFlag[] = "stack_trace_depth"; +const char kStreamResultToFlag[] = "stream_result_to"; +const char kThrowOnFailureFlag[] = "throw_on_failure"; + +// A valid random seed must be in [1, kMaxRandomSeed]. +const int kMaxRandomSeed = 99999; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +GTEST_API_ extern bool g_help_flag; + +// Returns the current time in milliseconds. +GTEST_API_ TimeInMillis GetTimeInMillis(); + +// Returns true iff Google Test should use colors in the output. +GTEST_API_ bool ShouldUseColor(bool stdout_is_tty); + +// Formats the given time in milliseconds as seconds. +GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms); + +// Parses a string for an Int32 flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +GTEST_API_ bool ParseInt32Flag( + const char* str, const char* flag, Int32* value); + +// Returns a random seed in range [1, kMaxRandomSeed] based on the +// given --gtest_random_seed flag value. +inline int GetRandomSeedFromFlag(Int32 random_seed_flag) { + const unsigned int raw_seed = (random_seed_flag == 0) ? + static_cast(GetTimeInMillis()) : + static_cast(random_seed_flag); + + // Normalizes the actual seed to range [1, kMaxRandomSeed] such that + // it's easy to type. + const int normalized_seed = + static_cast((raw_seed - 1U) % + static_cast(kMaxRandomSeed)) + 1; + return normalized_seed; +} + +// Returns the first valid random seed after 'seed'. The behavior is +// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is +// considered to be 1. +inline int GetNextRandomSeed(int seed) { + GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed) + << "Invalid random seed " << seed << " - must be in [1, " + << kMaxRandomSeed << "]."; + const int next_seed = seed + 1; + return (next_seed > kMaxRandomSeed) ? 1 : next_seed; +} + +// This class saves the values of all Google Test flags in its c'tor, and +// restores them in its d'tor. +class GTestFlagSaver { + public: + // The c'tor. + GTestFlagSaver() { + also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests); + break_on_failure_ = GTEST_FLAG(break_on_failure); + catch_exceptions_ = GTEST_FLAG(catch_exceptions); + color_ = GTEST_FLAG(color); + death_test_style_ = GTEST_FLAG(death_test_style); + death_test_use_fork_ = GTEST_FLAG(death_test_use_fork); + filter_ = GTEST_FLAG(filter); + internal_run_death_test_ = GTEST_FLAG(internal_run_death_test); + list_tests_ = GTEST_FLAG(list_tests); + output_ = GTEST_FLAG(output); + print_time_ = GTEST_FLAG(print_time); + random_seed_ = GTEST_FLAG(random_seed); + repeat_ = GTEST_FLAG(repeat); + shuffle_ = GTEST_FLAG(shuffle); + stack_trace_depth_ = GTEST_FLAG(stack_trace_depth); + stream_result_to_ = GTEST_FLAG(stream_result_to); + throw_on_failure_ = GTEST_FLAG(throw_on_failure); + } + + // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS. + ~GTestFlagSaver() { + GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_; + GTEST_FLAG(break_on_failure) = break_on_failure_; + GTEST_FLAG(catch_exceptions) = catch_exceptions_; + GTEST_FLAG(color) = color_; + GTEST_FLAG(death_test_style) = death_test_style_; + GTEST_FLAG(death_test_use_fork) = death_test_use_fork_; + GTEST_FLAG(filter) = filter_; + GTEST_FLAG(internal_run_death_test) = internal_run_death_test_; + GTEST_FLAG(list_tests) = list_tests_; + GTEST_FLAG(output) = output_; + GTEST_FLAG(print_time) = print_time_; + GTEST_FLAG(random_seed) = random_seed_; + GTEST_FLAG(repeat) = repeat_; + GTEST_FLAG(shuffle) = shuffle_; + GTEST_FLAG(stack_trace_depth) = stack_trace_depth_; + GTEST_FLAG(stream_result_to) = stream_result_to_; + GTEST_FLAG(throw_on_failure) = throw_on_failure_; + } + private: + // Fields for saving the original values of flags. + bool also_run_disabled_tests_; + bool break_on_failure_; + bool catch_exceptions_; + String color_; + String death_test_style_; + bool death_test_use_fork_; + String filter_; + String internal_run_death_test_; + bool list_tests_; + String output_; + bool print_time_; + bool pretty_; + internal::Int32 random_seed_; + internal::Int32 repeat_; + bool shuffle_; + internal::Int32 stack_trace_depth_; + String stream_result_to_; + bool throw_on_failure_; +} GTEST_ATTRIBUTE_UNUSED_; + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// The output buffer str must containt at least 32 characters. +// The function returns the address of the output buffer. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. +GTEST_API_ char* CodePointToUtf8(UInt32 code_point, char* str); + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +GTEST_API_ String WideStringToUtf8(const wchar_t* str, int num_chars); + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded(); + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (e.g., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +GTEST_API_ bool ShouldShard(const char* total_shards_str, + const char* shard_index_str, + bool in_subprocess_for_death_test); + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error and +// and aborts. +GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val); + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +GTEST_API_ bool ShouldRunTestOnShard( + int total_shards, int shard_index, int test_id); + +// STL container utilities. + +// Returns the number of elements in the given container that satisfy +// the given predicate. +template +inline int CountIf(const Container& c, Predicate predicate) { + // Implemented as an explicit loop since std::count_if() in libCstd on + // Solaris has a non-standard signature. + int count = 0; + for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { + if (predicate(*it)) + ++count; + } + return count; +} + +// Applies a function/functor to each element in the container. +template +void ForEach(const Container& c, Functor functor) { + std::for_each(c.begin(), c.end(), functor); +} + +// Returns the i-th element of the vector, or default_value if i is not +// in range [0, v.size()). +template +inline E GetElementOr(const std::vector& v, int i, E default_value) { + return (i < 0 || i >= static_cast(v.size())) ? default_value : v[i]; +} + +// Performs an in-place shuffle of a range of the vector's elements. +// 'begin' and 'end' are element indices as an STL-style range; +// i.e. [begin, end) are shuffled, where 'end' == size() means to +// shuffle to the end of the vector. +template +void ShuffleRange(internal::Random* random, int begin, int end, + std::vector* v) { + const int size = static_cast(v->size()); + GTEST_CHECK_(0 <= begin && begin <= size) + << "Invalid shuffle range start " << begin << ": must be in range [0, " + << size << "]."; + GTEST_CHECK_(begin <= end && end <= size) + << "Invalid shuffle range finish " << end << ": must be in range [" + << begin << ", " << size << "]."; + + // Fisher-Yates shuffle, from + // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle + for (int range_width = end - begin; range_width >= 2; range_width--) { + const int last_in_range = begin + range_width - 1; + const int selected = begin + random->Generate(range_width); + std::swap((*v)[selected], (*v)[last_in_range]); + } +} + +// Performs an in-place shuffle of the vector's elements. +template +inline void Shuffle(internal::Random* random, std::vector* v) { + ShuffleRange(random, 0, static_cast(v->size()), v); +} + +// A function for deleting an object. Handy for being used as a +// functor. +template +static void Delete(T* x) { + delete x; +} + +// A predicate that checks the key of a TestProperty against a known key. +// +// TestPropertyKeyIs is copyable. +class TestPropertyKeyIs { + public: + // Constructor. + // + // TestPropertyKeyIs has NO default constructor. + explicit TestPropertyKeyIs(const char* key) + : key_(key) {} + + // Returns true iff the test name of test property matches on key_. + bool operator()(const TestProperty& test_property) const { + return String(test_property.key()).Compare(key_) == 0; + } + + private: + String key_; +}; + +// Class UnitTestOptions. +// +// This class contains functions for processing options the user +// specifies when running the tests. It has only static members. +// +// In most cases, the user can specify an option using either an +// environment variable or a command line flag. E.g. you can set the +// test filter using either GTEST_FILTER or --gtest_filter. If both +// the variable and the flag are present, the latter overrides the +// former. +class GTEST_API_ UnitTestOptions { + public: + // Functions for processing the gtest_output flag. + + // Returns the output format, or "" for normal printed output. + static String GetOutputFormat(); + + // Returns the absolute path of the requested output file, or the + // default (test_detail.xml in the original working directory) if + // none was explicitly specified. + static String GetAbsolutePathToOutputFile(); + + // Functions for processing the gtest_filter flag. + + // Returns true iff the wildcard pattern matches the string. The + // first ':' or '\0' character in pattern marks the end of it. + // + // This recursive algorithm isn't very efficient, but is clear and + // works well enough for matching test names, which are short. + static bool PatternMatchesString(const char *pattern, const char *str); + + // Returns true iff the user-specified filter matches the test case + // name and the test name. + static bool FilterMatchesTest(const String &test_case_name, + const String &test_name); + +#if GTEST_OS_WINDOWS + // Function for supporting the gtest_catch_exception flag. + + // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the + // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. + // This function is useful as an __except condition. + static int GTestShouldProcessSEH(DWORD exception_code); +#endif // GTEST_OS_WINDOWS + + // Returns true if "name" matches the ':' separated list of glob-style + // filters in "filter". + static bool MatchesFilter(const String& name, const char* filter); +}; + +// Returns the current application's name, removing directory path if that +// is present. Used by UnitTestOptions::GetOutputFile. +GTEST_API_ FilePath GetCurrentExecutableName(); + +// The role interface for getting the OS stack trace as a string. +class OsStackTraceGetterInterface { + public: + OsStackTraceGetterInterface() {} + virtual ~OsStackTraceGetterInterface() {} + + // Returns the current OS stack trace as a String. Parameters: + // + // max_depth - the maximum number of stack frames to be included + // in the trace. + // skip_count - the number of top frames to be skipped; doesn't count + // against max_depth. + virtual String CurrentStackTrace(int max_depth, int skip_count) = 0; + + // UponLeavingGTest() should be called immediately before Google Test calls + // user code. It saves some information about the current stack that + // CurrentStackTrace() will use to find and hide Google Test stack frames. + virtual void UponLeavingGTest() = 0; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface); +}; + +// A working implementation of the OsStackTraceGetterInterface interface. +class OsStackTraceGetter : public OsStackTraceGetterInterface { + public: + OsStackTraceGetter() : caller_frame_(NULL) {} + virtual String CurrentStackTrace(int max_depth, int skip_count); + virtual void UponLeavingGTest(); + + // This string is inserted in place of stack frames that are part of + // Google Test's implementation. + static const char* const kElidedFramesMarker; + + private: + Mutex mutex_; // protects all internal state + + // We save the stack frame below the frame that calls user code. + // We do this because the address of the frame immediately below + // the user code changes between the call to UponLeavingGTest() + // and any calls to CurrentStackTrace() from within the user code. + void* caller_frame_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter); +}; + +// Information about a Google Test trace point. +struct TraceInfo { + const char* file; + int line; + String message; +}; + +// This is the default global test part result reporter used in UnitTestImpl. +// This class should only be used by UnitTestImpl. +class DefaultGlobalTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. Reports the test part + // result in the current test. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter); +}; + +// This is the default per thread test part result reporter used in +// UnitTestImpl. This class should only be used by UnitTestImpl. +class DefaultPerThreadTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. The implementation just + // delegates to the current global test part result reporter of *unit_test_. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter); +}; + +// The private implementation of the UnitTest class. We don't protect +// the methods under a mutex, as this class is not accessible by a +// user and the UnitTest class that delegates work to this class does +// proper locking. +class GTEST_API_ UnitTestImpl { + public: + explicit UnitTestImpl(UnitTest* parent); + virtual ~UnitTestImpl(); + + // There are two different ways to register your own TestPartResultReporter. + // You can register your own repoter to listen either only for test results + // from the current thread or for results from all threads. + // By default, each per-thread test result repoter just passes a new + // TestPartResult to the global test result reporter, which registers the + // test part result for the currently running test. + + // Returns the global test part result reporter. + TestPartResultReporterInterface* GetGlobalTestPartResultReporter(); + + // Sets the global test part result reporter. + void SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter); + + // Returns the test part result reporter for the current thread. + TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread(); + + // Sets the test part result reporter for the current thread. + void SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter); + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const { return !Failed(); } + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const { + return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed(); + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[i]; + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i) { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[index]; + } + + // Provides access to the event listener list. + TestEventListeners* listeners() { return &listeners_; } + + // Returns the TestResult for the test that's currently running, or + // the TestResult for the ad hoc test if no test is running. + TestResult* current_test_result(); + + // Returns the TestResult for the ad hoc test. + const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; } + + // Sets the OS stack trace getter. + // + // Does nothing if the input and the current OS stack trace getter + // are the same; otherwise, deletes the old getter and makes the + // input the current getter. + void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter); + + // Returns the current OS stack trace getter if it is not NULL; + // otherwise, creates an OsStackTraceGetter, makes it the current + // getter, and returns it. + OsStackTraceGetterInterface* os_stack_trace_getter(); + + // Returns the current OS stack trace as a String. + // + // The maximum number of stack frames to be included is specified by + // the gtest_stack_trace_depth flag. The skip_count parameter + // specifies the number of top frames to be skipped, which doesn't + // count against the number of frames to be included. + // + // For example, if Foo() calls Bar(), which in turn calls + // CurrentOsStackTraceExceptTop(1), Foo() will be included in the + // trace but Bar() and CurrentOsStackTraceExceptTop() won't. + String CurrentOsStackTraceExceptTop(int skip_count); + + // Finds and returns a TestCase with the given name. If one doesn't + // exist, creates one and returns it. + // + // Arguments: + // + // test_case_name: name of the test case + // type_param: the name of the test's type parameter, or NULL if + // this is not a typed or a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase* GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Adds a TestInfo to the unit test. + // + // Arguments: + // + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + // test_info: the TestInfo object + void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + TestInfo* test_info) { + // In order to support thread-safe death tests, we need to + // remember the original working directory when the test program + // was first invoked. We cannot do this in RUN_ALL_TESTS(), as + // the user may have changed the current directory before calling + // RUN_ALL_TESTS(). Therefore we capture the current directory in + // AddTestInfo(), which is called to register a TEST or TEST_F + // before main() is reached. + if (original_working_dir_.IsEmpty()) { + original_working_dir_.Set(FilePath::GetCurrentDir()); + GTEST_CHECK_(!original_working_dir_.IsEmpty()) + << "Failed to get the current working directory."; + } + + GetTestCase(test_info->test_case_name(), + test_info->type_param(), + set_up_tc, + tear_down_tc)->AddTestInfo(test_info); + } + +#if GTEST_HAS_PARAM_TEST + // Returns ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() { + return parameterized_test_registry_; + } +#endif // GTEST_HAS_PARAM_TEST + + // Sets the TestCase object for the test that's currently running. + void set_current_test_case(TestCase* a_current_test_case) { + current_test_case_ = a_current_test_case; + } + + // Sets the TestInfo object for the test that's currently running. If + // current_test_info is NULL, the assertion results will be stored in + // ad_hoc_test_result_. + void set_current_test_info(TestInfo* a_current_test_info) { + current_test_info_ = a_current_test_info; + } + + // Registers all parameterized tests defined using TEST_P and + // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter + // combination. This method can be called more then once; it has guards + // protecting from registering the tests more then once. If + // value-parameterized tests are disabled, RegisterParameterizedTests is + // present but does nothing. + void RegisterParameterizedTests(); + + // Runs all tests in this UnitTest object, prints the result, and + // returns true if all tests are successful. If any exception is + // thrown during a test, this test is considered to be failed, but + // the rest of the tests will still be run. + bool RunAllTests(); + + // Clears the results of all tests, except the ad hoc tests. + void ClearNonAdHocTestResult() { + ForEach(test_cases_, TestCase::ClearTestCaseResult); + } + + // Clears the results of ad-hoc test assertions. + void ClearAdHocTestResult() { + ad_hoc_test_result_.Clear(); + } + + enum ReactionToSharding { + HONOR_SHARDING_PROTOCOL, + IGNORE_SHARDING_PROTOCOL + }; + + // Matches the full name of each test against the user-specified + // filter to decide whether the test should run, then records the + // result in each TestCase and TestInfo object. + // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests + // based on sharding variables in the environment. + // Returns the number of tests that should run. + int FilterTests(ReactionToSharding shard_tests); + + // Prints the names of the tests matching the user-specified filter flag. + void ListTestsMatchingFilter(); + + const TestCase* current_test_case() const { return current_test_case_; } + TestInfo* current_test_info() { return current_test_info_; } + const TestInfo* current_test_info() const { return current_test_info_; } + + // Returns the vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector& environments() { return environments_; } + + // Getters for the per-thread Google Test trace stack. + std::vector& gtest_trace_stack() { + return *(gtest_trace_stack_.pointer()); + } + const std::vector& gtest_trace_stack() const { + return gtest_trace_stack_.get(); + } + +#if GTEST_HAS_DEATH_TEST + void InitDeathTestSubprocessControlInfo() { + internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag()); + } + // Returns a pointer to the parsed --gtest_internal_run_death_test + // flag, or NULL if that flag was not specified. + // This information is useful only in a death test child process. + // Must not be called before a call to InitGoogleTest. + const InternalRunDeathTestFlag* internal_run_death_test_flag() const { + return internal_run_death_test_flag_.get(); + } + + // Returns a pointer to the current death test factory. + internal::DeathTestFactory* death_test_factory() { + return death_test_factory_.get(); + } + + void SuppressTestEventsIfInSubprocess(); + + friend class ReplaceDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + + // Initializes the event listener performing XML output as specified by + // UnitTestOptions. Must not be called before InitGoogleTest. + void ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Initializes the event listener for streaming test results to a socket. + // Must not be called before InitGoogleTest. + void ConfigureStreamingOutput(); +#endif + + // Performs initialization dependent upon flag values obtained in + // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to + // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest + // this function is also called from RunAllTests. Since this function can be + // called more than once, it has to be idempotent. + void PostFlagParsingInit(); + + // Gets the random seed used at the start of the current test iteration. + int random_seed() const { return random_seed_; } + + // Gets the random number generator. + internal::Random* random() { return &random_; } + + // Shuffles all test cases, and the tests within each test case, + // making sure that death tests are still run first. + void ShuffleTests(); + + // Restores the test cases and tests to their order before the first shuffle. + void UnshuffleTests(); + + // Returns the value of GTEST_FLAG(catch_exceptions) at the moment + // UnitTest::Run() starts. + bool catch_exceptions() const { return catch_exceptions_; } + + private: + friend class ::testing::UnitTest; + + // Used by UnitTest::Run() to capture the state of + // GTEST_FLAG(catch_exceptions) at the moment it starts. + void set_catch_exceptions(bool value) { catch_exceptions_ = value; } + + // The UnitTest object that owns this implementation object. + UnitTest* const parent_; + + // The working directory when the first TEST() or TEST_F() was + // executed. + internal::FilePath original_working_dir_; + + // The default test part result reporters. + DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_; + DefaultPerThreadTestPartResultReporter + default_per_thread_test_part_result_reporter_; + + // Points to (but doesn't own) the global test part result reporter. + TestPartResultReporterInterface* global_test_part_result_repoter_; + + // Protects read and write access to global_test_part_result_reporter_. + internal::Mutex global_test_part_result_reporter_mutex_; + + // Points to (but doesn't own) the per-thread test part result reporter. + internal::ThreadLocal + per_thread_test_part_result_reporter_; + + // The vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector environments_; + + // The vector of TestCases in their original order. It owns the + // elements in the vector. + std::vector test_cases_; + + // Provides a level of indirection for the test case list to allow + // easy shuffling and restoring the test case order. The i-th + // element of this vector is the index of the i-th test case in the + // shuffled order. + std::vector test_case_indices_; + +#if GTEST_HAS_PARAM_TEST + // ParameterizedTestRegistry object used to register value-parameterized + // tests. + internal::ParameterizedTestCaseRegistry parameterized_test_registry_; + + // Indicates whether RegisterParameterizedTests() has been called already. + bool parameterized_tests_registered_; +#endif // GTEST_HAS_PARAM_TEST + + // Index of the last death test case registered. Initially -1. + int last_death_test_case_; + + // This points to the TestCase for the currently running test. It + // changes as Google Test goes through one test case after another. + // When no test is running, this is set to NULL and Google Test + // stores assertion results in ad_hoc_test_result_. Initially NULL. + TestCase* current_test_case_; + + // This points to the TestInfo for the currently running test. It + // changes as Google Test goes through one test after another. When + // no test is running, this is set to NULL and Google Test stores + // assertion results in ad_hoc_test_result_. Initially NULL. + TestInfo* current_test_info_; + + // Normally, a user only writes assertions inside a TEST or TEST_F, + // or inside a function called by a TEST or TEST_F. Since Google + // Test keeps track of which test is current running, it can + // associate such an assertion with the test it belongs to. + // + // If an assertion is encountered when no TEST or TEST_F is running, + // Google Test attributes the assertion result to an imaginary "ad hoc" + // test, and records the result in ad_hoc_test_result_. + TestResult ad_hoc_test_result_; + + // The list of event listeners that can be used to track events inside + // Google Test. + TestEventListeners listeners_; + + // The OS stack trace getter. Will be deleted when the UnitTest + // object is destructed. By default, an OsStackTraceGetter is used, + // but the user can set this field to use a custom getter if that is + // desired. + OsStackTraceGetterInterface* os_stack_trace_getter_; + + // True iff PostFlagParsingInit() has been called. + bool post_flag_parse_init_performed_; + + // The random number seed used at the beginning of the test run. + int random_seed_; + + // Our random number generator. + internal::Random random_; + + // How long the test took to run, in milliseconds. + TimeInMillis elapsed_time_; + +#if GTEST_HAS_DEATH_TEST + // The decomposed components of the gtest_internal_run_death_test flag, + // parsed when RUN_ALL_TESTS is called. + internal::scoped_ptr internal_run_death_test_flag_; + internal::scoped_ptr death_test_factory_; +#endif // GTEST_HAS_DEATH_TEST + + // A per-thread stack of traces created by the SCOPED_TRACE() macro. + internal::ThreadLocal > gtest_trace_stack_; + + // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests() + // starts. + bool catch_exceptions_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl); +}; // class UnitTestImpl + +// Convenience function for accessing the global UnitTest +// implementation object. +inline UnitTestImpl* GetUnitTestImpl() { + return UnitTest::GetInstance()->impl(); +} + +#if GTEST_USES_SIMPLE_RE + +// Internal helper functions for implementing the simple regular +// expression matcher. +GTEST_API_ bool IsInSet(char ch, const char* str); +GTEST_API_ bool IsAsciiDigit(char ch); +GTEST_API_ bool IsAsciiPunct(char ch); +GTEST_API_ bool IsRepeat(char ch); +GTEST_API_ bool IsAsciiWhiteSpace(char ch); +GTEST_API_ bool IsAsciiWordChar(char ch); +GTEST_API_ bool IsValidEscape(char ch); +GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch); +GTEST_API_ bool ValidateRegex(const char* regex); +GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str); +GTEST_API_ bool MatchRepetitionAndRegexAtHead( + bool escaped, char ch, char repeat, const char* regex, const char* str); +GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str); + +#endif // GTEST_USES_SIMPLE_RE + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv); +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv); + +#if GTEST_HAS_DEATH_TEST + +// Returns the message describing the last system error, regardless of the +// platform. +GTEST_API_ String GetLastErrnoDescription(); + +# if GTEST_OS_WINDOWS +// Provides leak-safe Windows kernel handle ownership. +class AutoHandle { + public: + AutoHandle() : handle_(INVALID_HANDLE_VALUE) {} + explicit AutoHandle(HANDLE handle) : handle_(handle) {} + + ~AutoHandle() { Reset(); } + + HANDLE Get() const { return handle_; } + void Reset() { Reset(INVALID_HANDLE_VALUE); } + void Reset(HANDLE handle) { + if (handle != handle_) { + if (handle_ != INVALID_HANDLE_VALUE) + ::CloseHandle(handle_); + handle_ = handle; + } + } + + private: + HANDLE handle_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle); +}; +# endif // GTEST_OS_WINDOWS + +// Attempts to parse a string into a positive integer pointed to by the +// number parameter. Returns true if that is possible. +// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use +// it here. +template +bool ParseNaturalNumber(const ::std::string& str, Integer* number) { + // Fail fast if the given string does not begin with a digit; + // this bypasses strtoXXX's "optional leading whitespace and plus + // or minus sign" semantics, which are undesirable here. + if (str.empty() || !IsDigit(str[0])) { + return false; + } + errno = 0; + + char* end; + // BiggestConvertible is the largest integer type that system-provided + // string-to-number conversion routines can return. + +# if GTEST_OS_WINDOWS && !defined(__GNUC__) + + // MSVC and C++ Builder define __int64 instead of the standard long long. + typedef unsigned __int64 BiggestConvertible; + const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10); + +# else + + typedef unsigned long long BiggestConvertible; // NOLINT + const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10); + +# endif // GTEST_OS_WINDOWS && !defined(__GNUC__) + + const bool parse_success = *end == '\0' && errno == 0; + + // TODO(vladl@google.com): Convert this to compile time assertion when it is + // available. + GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed)); + + const Integer result = static_cast(parsed); + if (parse_success && static_cast(result) == parsed) { + *number = result; + return true; + } + return false; +} +#endif // GTEST_HAS_DEATH_TEST + +// TestResult contains some private methods that should be hidden from +// Google Test user but are required for testing. This class allow our tests +// to access them. +// +// This class is supplied only for the purpose of testing Google Test's own +// constructs. Do not use it in user tests, either directly or indirectly. +class TestResultAccessor { + public: + static void RecordProperty(TestResult* test_result, + const TestProperty& property) { + test_result->RecordProperty(property); + } + + static void ClearTestPartResults(TestResult* test_result) { + test_result->ClearTestPartResults(); + } + + static const std::vector& test_part_results( + const TestResult& test_result) { + return test_result.test_part_results(); + } +}; + +} // namespace internal +} // namespace testing + +#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_ +#undef GTEST_IMPLEMENTATION_ + +#if GTEST_OS_WINDOWS +# define vsnprintf _vsnprintf +#endif // GTEST_OS_WINDOWS + +namespace testing { + +using internal::CountIf; +using internal::ForEach; +using internal::GetElementOr; +using internal::Shuffle; + +// Constants. + +// A test whose test case name or test name matches this filter is +// disabled and not run. +static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*"; + +// A test case whose name matches this filter is considered a death +// test case and will be run before test cases whose name doesn't +// match this filter. +static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*"; + +// A test filter that matches everything. +static const char kUniversalFilter[] = "*"; + +// The default output file for XML output. +static const char kDefaultOutputFile[] = "test_detail.xml"; + +// The environment variable name for the test shard index. +static const char kTestShardIndex[] = "GTEST_SHARD_INDEX"; +// The environment variable name for the total number of test shards. +static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS"; +// The environment variable name for the test shard status file. +static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE"; + +namespace internal { + +// The text used in failure messages to indicate the start of the +// stack trace. +const char kStackTraceMarker[] = "\nStack trace:\n"; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +bool g_help_flag = false; + +} // namespace internal + +GTEST_DEFINE_bool_( + also_run_disabled_tests, + internal::BoolFromGTestEnv("also_run_disabled_tests", false), + "Run disabled tests too, in addition to the tests normally being run."); + +GTEST_DEFINE_bool_( + break_on_failure, + internal::BoolFromGTestEnv("break_on_failure", false), + "True iff a failed assertion should be a debugger break-point."); + +GTEST_DEFINE_bool_( + catch_exceptions, + internal::BoolFromGTestEnv("catch_exceptions", true), + "True iff " GTEST_NAME_ + " should catch exceptions and treat them as test failures."); + +GTEST_DEFINE_string_( + color, + internal::StringFromGTestEnv("color", "auto"), + "Whether to use colors in the output. Valid values: yes, no, " + "and auto. 'auto' means to use colors if the output is " + "being sent to a terminal and the TERM environment variable " + "is set to xterm, xterm-color, xterm-256color, linux or cygwin."); + +GTEST_DEFINE_string_( + filter, + internal::StringFromGTestEnv("filter", kUniversalFilter), + "A colon-separated list of glob (not regex) patterns " + "for filtering the tests to run, optionally followed by a " + "'-' and a : separated list of negative patterns (tests to " + "exclude). A test is run if it matches one of the positive " + "patterns and does not match any of the negative patterns."); + +GTEST_DEFINE_bool_(list_tests, false, + "List all tests without running them."); + +GTEST_DEFINE_string_( + output, + internal::StringFromGTestEnv("output", ""), + "A format (currently must be \"xml\"), optionally followed " + "by a colon and an output file name or directory. A directory " + "is indicated by a trailing pathname separator. " + "Examples: \"xml:filename.xml\", \"xml::directoryname/\". " + "If a directory is specified, output files will be created " + "within that directory, with file-names based on the test " + "executable's name and, if necessary, made unique by adding " + "digits."); + +GTEST_DEFINE_bool_( + print_time, + internal::BoolFromGTestEnv("print_time", true), + "True iff " GTEST_NAME_ + " should display elapsed time in text output."); + +GTEST_DEFINE_int32_( + random_seed, + internal::Int32FromGTestEnv("random_seed", 0), + "Random number seed to use when shuffling test orders. Must be in range " + "[1, 99999], or 0 to use a seed based on the current time."); + +GTEST_DEFINE_int32_( + repeat, + internal::Int32FromGTestEnv("repeat", 1), + "How many times to repeat each test. Specify a negative number " + "for repeating forever. Useful for shaking out flaky tests."); + +GTEST_DEFINE_bool_( + show_internal_stack_frames, false, + "True iff " GTEST_NAME_ " should include internal stack frames when " + "printing test failure stack traces."); + +GTEST_DEFINE_bool_( + shuffle, + internal::BoolFromGTestEnv("shuffle", false), + "True iff " GTEST_NAME_ + " should randomize tests' order on every run."); + +GTEST_DEFINE_int32_( + stack_trace_depth, + internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth), + "The maximum number of stack frames to print when an " + "assertion fails. The valid range is 0 through 100, inclusive."); + +GTEST_DEFINE_string_( + stream_result_to, + internal::StringFromGTestEnv("stream_result_to", ""), + "This flag specifies the host name and the port number on which to stream " + "test results. Example: \"localhost:555\". The flag is effective only on " + "Linux."); + +GTEST_DEFINE_bool_( + throw_on_failure, + internal::BoolFromGTestEnv("throw_on_failure", false), + "When this flag is specified, a failed assertion will throw an exception " + "if exceptions are enabled or exit the program with a non-zero code " + "otherwise."); + +namespace internal { + +// Generates a random number from [0, range), using a Linear +// Congruential Generator (LCG). Crashes if 'range' is 0 or greater +// than kMaxRange. +UInt32 Random::Generate(UInt32 range) { + // These constants are the same as are used in glibc's rand(3). + state_ = (1103515245U*state_ + 12345U) % kMaxRange; + + GTEST_CHECK_(range > 0) + << "Cannot generate a number in the range [0, 0)."; + GTEST_CHECK_(range <= kMaxRange) + << "Generation of a number in [0, " << range << ") was requested, " + << "but this can only generate numbers in [0, " << kMaxRange << ")."; + + // Converting via modulus introduces a bit of downward bias, but + // it's simple, and a linear congruential generator isn't too good + // to begin with. + return state_ % range; +} + +// GTestIsInitialized() returns true iff the user has initialized +// Google Test. Useful for catching the user mistake of not initializing +// Google Test before calling RUN_ALL_TESTS(). +// +// A user must call testing::InitGoogleTest() to initialize Google +// Test. g_init_gtest_count is set to the number of times +// InitGoogleTest() has been called. We don't protect this variable +// under a mutex as it is only accessed in the main thread. +int g_init_gtest_count = 0; +static bool GTestIsInitialized() { return g_init_gtest_count != 0; } + +// Iterates over a vector of TestCases, keeping a running sum of the +// results of calling a given int-returning method on each. +// Returns the sum. +static int SumOverTestCaseList(const std::vector& case_list, + int (TestCase::*method)() const) { + int sum = 0; + for (size_t i = 0; i < case_list.size(); i++) { + sum += (case_list[i]->*method)(); + } + return sum; +} + +// Returns true iff the test case passed. +static bool TestCasePassed(const TestCase* test_case) { + return test_case->should_run() && test_case->Passed(); +} + +// Returns true iff the test case failed. +static bool TestCaseFailed(const TestCase* test_case) { + return test_case->should_run() && test_case->Failed(); +} + +// Returns true iff test_case contains at least one test that should +// run. +static bool ShouldRunTestCase(const TestCase* test_case) { + return test_case->should_run(); +} + +// AssertHelper constructor. +AssertHelper::AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message) + : data_(new AssertHelperData(type, file, line, message)) { +} + +AssertHelper::~AssertHelper() { + delete data_; +} + +// Message assignment, for assertion streaming support. +void AssertHelper::operator=(const Message& message) const { + UnitTest::GetInstance()-> + AddTestPartResult(data_->type, data_->file, data_->line, + AppendUserMessage(data_->message, message), + UnitTest::GetInstance()->impl() + ->CurrentOsStackTraceExceptTop(1) + // Skips the stack frame for this function itself. + ); // NOLINT +} + +// Mutex for linked pointers. +GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// Application pathname gotten in InitGoogleTest. +String g_executable_path; + +// Returns the current application's name, removing directory path if that +// is present. +FilePath GetCurrentExecutableName() { + FilePath result; + +#if GTEST_OS_WINDOWS + result.Set(FilePath(g_executable_path).RemoveExtension("exe")); +#else + result.Set(FilePath(g_executable_path)); +#endif // GTEST_OS_WINDOWS + + return result.RemoveDirectoryName(); +} + +// Functions for processing the gtest_output flag. + +// Returns the output format, or "" for normal printed output. +String UnitTestOptions::GetOutputFormat() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) return String(""); + + const char* const colon = strchr(gtest_output_flag, ':'); + return (colon == NULL) ? + String(gtest_output_flag) : + String(gtest_output_flag, colon - gtest_output_flag); +} + +// Returns the name of the requested output file, or the default if none +// was explicitly specified. +String UnitTestOptions::GetAbsolutePathToOutputFile() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) + return String(""); + + const char* const colon = strchr(gtest_output_flag, ':'); + if (colon == NULL) + return String(internal::FilePath::ConcatPaths( + internal::FilePath( + UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(kDefaultOutputFile)).ToString() ); + + internal::FilePath output_name(colon + 1); + if (!output_name.IsAbsolutePath()) + // TODO(wan@google.com): on Windows \some\path is not an absolute + // path (as its meaning depends on the current drive), yet the + // following logic for turning it into an absolute path is wrong. + // Fix it. + output_name = internal::FilePath::ConcatPaths( + internal::FilePath(UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(colon + 1)); + + if (!output_name.IsDirectory()) + return output_name.ToString(); + + internal::FilePath result(internal::FilePath::GenerateUniqueFileName( + output_name, internal::GetCurrentExecutableName(), + GetOutputFormat().c_str())); + return result.ToString(); +} + +// Returns true iff the wildcard pattern matches the string. The +// first ':' or '\0' character in pattern marks the end of it. +// +// This recursive algorithm isn't very efficient, but is clear and +// works well enough for matching test names, which are short. +bool UnitTestOptions::PatternMatchesString(const char *pattern, + const char *str) { + switch (*pattern) { + case '\0': + case ':': // Either ':' or '\0' marks the end of the pattern. + return *str == '\0'; + case '?': // Matches any single character. + return *str != '\0' && PatternMatchesString(pattern + 1, str + 1); + case '*': // Matches any string (possibly empty) of characters. + return (*str != '\0' && PatternMatchesString(pattern, str + 1)) || + PatternMatchesString(pattern + 1, str); + default: // Non-special character. Matches itself. + return *pattern == *str && + PatternMatchesString(pattern + 1, str + 1); + } +} + +bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) { + const char *cur_pattern = filter; + for (;;) { + if (PatternMatchesString(cur_pattern, name.c_str())) { + return true; + } + + // Finds the next pattern in the filter. + cur_pattern = strchr(cur_pattern, ':'); + + // Returns if no more pattern can be found. + if (cur_pattern == NULL) { + return false; + } + + // Skips the pattern separater (the ':' character). + cur_pattern++; + } +} + +// TODO(keithray): move String function implementations to gtest-string.cc. + +// Returns true iff the user-specified filter matches the test case +// name and the test name. +bool UnitTestOptions::FilterMatchesTest(const String &test_case_name, + const String &test_name) { + const String& full_name = String::Format("%s.%s", + test_case_name.c_str(), + test_name.c_str()); + + // Split --gtest_filter at '-', if there is one, to separate into + // positive filter and negative filter portions + const char* const p = GTEST_FLAG(filter).c_str(); + const char* const dash = strchr(p, '-'); + String positive; + String negative; + if (dash == NULL) { + positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter + negative = String(""); + } else { + positive = String(p, dash - p); // Everything up to the dash + negative = String(dash+1); // Everything after the dash + if (positive.empty()) { + // Treat '-test1' as the same as '*-test1' + positive = kUniversalFilter; + } + } + + // A filter is a colon-separated list of patterns. It matches a + // test if any pattern in it matches the test. + return (MatchesFilter(full_name, positive.c_str()) && + !MatchesFilter(full_name, negative.c_str())); +} + +#if GTEST_HAS_SEH +// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the +// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. +// This function is useful as an __except condition. +int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) { + // Google Test should handle a SEH exception if: + // 1. the user wants it to, AND + // 2. this is not a breakpoint exception, AND + // 3. this is not a C++ exception (VC++ implements them via SEH, + // apparently). + // + // SEH exception code for C++ exceptions. + // (see http://support.microsoft.com/kb/185294 for more information). + const DWORD kCxxExceptionCode = 0xe06d7363; + + bool should_handle = true; + + if (!GTEST_FLAG(catch_exceptions)) + should_handle = false; + else if (exception_code == EXCEPTION_BREAKPOINT) + should_handle = false; + else if (exception_code == kCxxExceptionCode) + should_handle = false; + + return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH; +} +#endif // GTEST_HAS_SEH + +} // namespace internal + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. Intercepts only failures from the current thread. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + TestPartResultArray* result) + : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD), + result_(result) { + Init(); +} + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + InterceptMode intercept_mode, TestPartResultArray* result) + : intercept_mode_(intercept_mode), + result_(result) { + Init(); +} + +void ScopedFakeTestPartResultReporter::Init() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + old_reporter_ = impl->GetGlobalTestPartResultReporter(); + impl->SetGlobalTestPartResultReporter(this); + } else { + old_reporter_ = impl->GetTestPartResultReporterForCurrentThread(); + impl->SetTestPartResultReporterForCurrentThread(this); + } +} + +// The d'tor restores the test part result reporter used by Google Test +// before. +ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + impl->SetGlobalTestPartResultReporter(old_reporter_); + } else { + impl->SetTestPartResultReporterForCurrentThread(old_reporter_); + } +} + +// Increments the test part result count and remembers the result. +// This method is from the TestPartResultReporterInterface interface. +void ScopedFakeTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + result_->Append(result); +} + +namespace internal { + +// Returns the type ID of ::testing::Test. We should always call this +// instead of GetTypeId< ::testing::Test>() to get the type ID of +// testing::Test. This is to work around a suspected linker bug when +// using Google Test as a framework on Mac OS X. The bug causes +// GetTypeId< ::testing::Test>() to return different values depending +// on whether the call is from the Google Test framework itself or +// from user test code. GetTestTypeId() is guaranteed to always +// return the same value, as it always calls GetTypeId<>() from the +// gtest.cc, which is within the Google Test framework. +TypeId GetTestTypeId() { + return GetTypeId(); +} + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId(); + +// This predicate-formatter checks that 'results' contains a test part +// failure of the given type and that the failure message contains the +// given substring. +AssertionResult HasOneFailure(const char* /* results_expr */, + const char* /* type_expr */, + const char* /* substr_expr */, + const TestPartResultArray& results, + TestPartResult::Type type, + const string& substr) { + const String expected(type == TestPartResult::kFatalFailure ? + "1 fatal failure" : + "1 non-fatal failure"); + Message msg; + if (results.size() != 1) { + msg << "Expected: " << expected << "\n" + << " Actual: " << results.size() << " failures"; + for (int i = 0; i < results.size(); i++) { + msg << "\n" << results.GetTestPartResult(i); + } + return AssertionFailure() << msg; + } + + const TestPartResult& r = results.GetTestPartResult(0); + if (r.type() != type) { + return AssertionFailure() << "Expected: " << expected << "\n" + << " Actual:\n" + << r; + } + + if (strstr(r.message(), substr.c_str()) == NULL) { + return AssertionFailure() << "Expected: " << expected << " containing \"" + << substr << "\"\n" + << " Actual:\n" + << r; + } + + return AssertionSuccess(); +} + +// The constructor of SingleFailureChecker remembers where to look up +// test part results, what type of failure we expect, and what +// substring the failure message should contain. +SingleFailureChecker:: SingleFailureChecker( + const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr) + : results_(results), + type_(type), + substr_(substr) {} + +// The destructor of SingleFailureChecker verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +SingleFailureChecker::~SingleFailureChecker() { + EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_); +} + +DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultGlobalTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->current_test_result()->AddTestPartResult(result); + unit_test_->listeners()->repeater()->OnTestPartResult(result); +} + +DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultPerThreadTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result); +} + +// Returns the global test part result reporter. +TestPartResultReporterInterface* +UnitTestImpl::GetGlobalTestPartResultReporter() { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + return global_test_part_result_repoter_; +} + +// Sets the global test part result reporter. +void UnitTestImpl::SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter) { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + global_test_part_result_repoter_ = reporter; +} + +// Returns the test part result reporter for the current thread. +TestPartResultReporterInterface* +UnitTestImpl::GetTestPartResultReporterForCurrentThread() { + return per_thread_test_part_result_reporter_.get(); +} + +// Sets the test part result reporter for the current thread. +void UnitTestImpl::SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter) { + per_thread_test_part_result_reporter_.set(reporter); +} + +// Gets the number of successful test cases. +int UnitTestImpl::successful_test_case_count() const { + return CountIf(test_cases_, TestCasePassed); +} + +// Gets the number of failed test cases. +int UnitTestImpl::failed_test_case_count() const { + return CountIf(test_cases_, TestCaseFailed); +} + +// Gets the number of all test cases. +int UnitTestImpl::total_test_case_count() const { + return static_cast(test_cases_.size()); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTestImpl::test_case_to_run_count() const { + return CountIf(test_cases_, ShouldRunTestCase); +} + +// Gets the number of successful tests. +int UnitTestImpl::successful_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count); +} + +// Gets the number of failed tests. +int UnitTestImpl::failed_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count); +} + +// Gets the number of disabled tests. +int UnitTestImpl::disabled_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count); +} + +// Gets the number of all tests. +int UnitTestImpl::total_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::total_test_count); +} + +// Gets the number of tests that should run. +int UnitTestImpl::test_to_run_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count); +} + +// Returns the current OS stack trace as a String. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// CurrentOsStackTraceExceptTop(1), Foo() will be included in the +// trace but Bar() and CurrentOsStackTraceExceptTop() won't. +String UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) { + (void)skip_count; + return String(""); +} + +// Returns the current time in milliseconds. +TimeInMillis GetTimeInMillis() { +#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__) + // Difference between 1970-01-01 and 1601-01-01 in milliseconds. + // http://analogous.blogspot.com/2005/04/epoch.html + const TimeInMillis kJavaEpochToWinFileTimeDelta = + static_cast(116444736UL) * 100000UL; + const DWORD kTenthMicrosInMilliSecond = 10000; + + SYSTEMTIME now_systime; + FILETIME now_filetime; + ULARGE_INTEGER now_int64; + // TODO(kenton@google.com): Shouldn't this just use + // GetSystemTimeAsFileTime()? + GetSystemTime(&now_systime); + if (SystemTimeToFileTime(&now_systime, &now_filetime)) { + now_int64.LowPart = now_filetime.dwLowDateTime; + now_int64.HighPart = now_filetime.dwHighDateTime; + now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) - + kJavaEpochToWinFileTimeDelta; + return now_int64.QuadPart; + } + return 0; +#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_ + __timeb64 now; + +# ifdef _MSC_VER + + // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996 + // (deprecated function) there. + // TODO(kenton@google.com): Use GetTickCount()? Or use + // SystemTimeToFileTime() +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996. + _ftime64(&now); +# pragma warning(pop) // Restores the warning state. +# else + + _ftime64(&now); + +# endif // _MSC_VER + + return static_cast(now.time) * 1000 + now.millitm; +#elif GTEST_HAS_GETTIMEOFDAY_ + struct timeval now; + gettimeofday(&now, NULL); + return static_cast(now.tv_sec) * 1000 + now.tv_usec / 1000; +#else +# error "Don't know how to get the current time on your system." +#endif +} + +// Utilities + +// class String + +// Returns the input enclosed in double quotes if it's not NULL; +// otherwise returns "(null)". For example, "\"Hello\"" is returned +// for input "Hello". +// +// This is useful for printing a C string in the syntax of a literal. +// +// Known issue: escape sequences are not handled yet. +String String::ShowCStringQuoted(const char* c_str) { + return c_str ? String::Format("\"%s\"", c_str) : String("(null)"); +} + +// Copies at most length characters from str into a newly-allocated +// piece of memory of size length+1. The memory is allocated with new[]. +// A terminating null byte is written to the memory, and a pointer to it +// is returned. If str is NULL, NULL is returned. +static char* CloneString(const char* str, size_t length) { + if (str == NULL) { + return NULL; + } else { + char* const clone = new char[length + 1]; + posix::StrNCpy(clone, str, length); + clone[length] = '\0'; + return clone; + } +} + +// Clones a 0-terminated C string, allocating memory using new. The +// caller is responsible for deleting[] the return value. Returns the +// cloned string, or NULL if the input is NULL. +const char * String::CloneCString(const char* c_str) { + return (c_str == NULL) ? + NULL : CloneString(c_str, strlen(c_str)); +} + +#if GTEST_OS_WINDOWS_MOBILE +// Creates a UTF-16 wide string from the given ANSI string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the wide string, or NULL if the +// input is NULL. +LPCWSTR String::AnsiToUtf16(const char* ansi) { + if (!ansi) return NULL; + const int length = strlen(ansi); + const int unicode_length = + MultiByteToWideChar(CP_ACP, 0, ansi, length, + NULL, 0); + WCHAR* unicode = new WCHAR[unicode_length + 1]; + MultiByteToWideChar(CP_ACP, 0, ansi, length, + unicode, unicode_length); + unicode[unicode_length] = 0; + return unicode; +} + +// Creates an ANSI string from the given wide string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the ANSI string, or NULL if the +// input is NULL. +const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { + if (!utf16_str) return NULL; + const int ansi_length = + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + NULL, 0, NULL, NULL); + char* ansi = new char[ansi_length + 1]; + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + ansi, ansi_length, NULL, NULL); + ansi[ansi_length] = 0; + return ansi; +} + +#endif // GTEST_OS_WINDOWS_MOBILE + +// Compares two C strings. Returns true iff they have the same content. +// +// Unlike strcmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CStringEquals(const char * lhs, const char * rhs) { + if ( lhs == NULL ) return rhs == NULL; + + if ( rhs == NULL ) return false; + + return strcmp(lhs, rhs) == 0; +} + +#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +// Converts an array of wide chars to a narrow string using the UTF-8 +// encoding, and streams the result to the given Message object. +static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, + Message* msg) { + // TODO(wan): consider allowing a testing::String object to + // contain '\0'. This will make it behave more like std::string, + // and will allow ToUtf8String() to return the correct encoding + // for '\0' s.t. we can get rid of the conditional here (and in + // several other places). + for (size_t i = 0; i != length; ) { // NOLINT + if (wstr[i] != L'\0') { + *msg << WideStringToUtf8(wstr + i, static_cast(length - i)); + while (i != length && wstr[i] != L'\0') + i++; + } else { + *msg << '\0'; + i++; + } + } +} + +#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +} // namespace internal + +#if GTEST_HAS_STD_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::std::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +// AssertionResult constructors. +// Used in EXPECT_TRUE/FALSE(assertion_result). +AssertionResult::AssertionResult(const AssertionResult& other) + : success_(other.success_), + message_(other.message_.get() != NULL ? + new ::std::string(*other.message_) : + static_cast< ::std::string*>(NULL)) { +} + +// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. +AssertionResult AssertionResult::operator!() const { + AssertionResult negation(!success_); + if (message_.get() != NULL) + negation << *message_; + return negation; +} + +// Makes a successful assertion result. +AssertionResult AssertionSuccess() { + return AssertionResult(true); +} + +// Makes a failed assertion result. +AssertionResult AssertionFailure() { + return AssertionResult(false); +} + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << message. +AssertionResult AssertionFailure(const Message& message) { + return AssertionFailure() << message; +} + +namespace internal { + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const String& expected_value, + const String& actual_value, + bool ignoring_case) { + Message msg; + msg << "Value of: " << actual_expression; + if (actual_value != actual_expression) { + msg << "\n Actual: " << actual_value; + } + + msg << "\nExpected: " << expected_expression; + if (ignoring_case) { + msg << " (ignoring case)"; + } + if (expected_value != expected_expression) { + msg << "\nWhich is: " << expected_value; + } + + return AssertionFailure() << msg; +} + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +String GetBoolAssertionFailureMessage(const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value) { + const char* actual_message = assertion_result.message(); + Message msg; + msg << "Value of: " << expression_text + << "\n Actual: " << actual_predicate_value; + if (actual_message[0] != '\0') + msg << " (" << actual_message << ")"; + msg << "\nExpected: " << expected_predicate_value; + return msg.GetString(); +} + +// Helper function for implementing ASSERT_NEAR. +AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error) { + const double diff = fabs(val1 - val2); + if (diff <= abs_error) return AssertionSuccess(); + + // TODO(wan): do not print the value of an expression if it's + // already a literal. + return AssertionFailure() + << "The difference between " << expr1 << " and " << expr2 + << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n" + << expr1 << " evaluates to " << val1 << ",\n" + << expr2 << " evaluates to " << val2 << ", and\n" + << abs_error_expr << " evaluates to " << abs_error << "."; +} + + +// Helper template for implementing FloatLE() and DoubleLE(). +template +AssertionResult FloatingPointLE(const char* expr1, + const char* expr2, + RawType val1, + RawType val2) { + // Returns success if val1 is less than val2, + if (val1 < val2) { + return AssertionSuccess(); + } + + // or if val1 is almost equal to val2. + const FloatingPoint lhs(val1), rhs(val2); + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + // Note that the above two checks will both fail if either val1 or + // val2 is NaN, as the IEEE floating-point standard requires that + // any predicate involving a NaN must return false. + + ::std::stringstream val1_ss; + val1_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val1; + + ::std::stringstream val2_ss; + val2_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val2; + + return AssertionFailure() + << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n" + << " Actual: " << StringStreamToString(&val1_ss) << " vs " + << StringStreamToString(&val2_ss); +} + +} // namespace internal + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +namespace internal { + +// The helper function for {ASSERT|EXPECT}_EQ with int or enum +// arguments. +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + if (expected == actual) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here +// just to avoid copy-and-paste of similar code. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + BiggestInt val1, BiggestInt val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +} + +// Implements the helper function for {ASSERT|EXPECT}_NE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(NE, !=) +// Implements the helper function for {ASSERT|EXPECT}_LE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LE, <=) +// Implements the helper function for {ASSERT|EXPECT}_LT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LT, < ) +// Implements the helper function for {ASSERT|EXPECT}_GE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GE, >=) +// Implements the helper function for {ASSERT|EXPECT}_GT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GT, > ) + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + String::ShowCStringQuoted(expected), + String::ShowCStringQuoted(actual), + false); +} + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CaseInsensitiveCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + String::ShowCStringQuoted(expected), + String::ShowCStringQuoted(actual), + true); +} + +// The helper function for {ASSERT|EXPECT}_STRNE. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CaseInsensitiveCStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() + << "Expected: (" << s1_expression << ") != (" + << s2_expression << ") (ignoring case), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +} // namespace internal + +namespace { + +// Helper functions for implementing IsSubString() and IsNotSubstring(). + +// This group of overloaded functions return true iff needle is a +// substring of haystack. NULL is considered a substring of itself +// only. + +bool IsSubstringPred(const char* needle, const char* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return strstr(haystack, needle) != NULL; +} + +bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return wcsstr(haystack, needle) != NULL; +} + +// StringType here can be either ::std::string or ::std::wstring. +template +bool IsSubstringPred(const StringType& needle, + const StringType& haystack) { + return haystack.find(needle) != StringType::npos; +} + +// This function implements either IsSubstring() or IsNotSubstring(), +// depending on the value of the expected_to_be_substring parameter. +// StringType here can be const char*, const wchar_t*, ::std::string, +// or ::std::wstring. +template +AssertionResult IsSubstringImpl( + bool expected_to_be_substring, + const char* needle_expr, const char* haystack_expr, + const StringType& needle, const StringType& haystack) { + if (IsSubstringPred(needle, haystack) == expected_to_be_substring) + return AssertionSuccess(); + + const bool is_wide_string = sizeof(needle[0]) > 1; + const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; + return AssertionFailure() + << "Value of: " << needle_expr << "\n" + << " Actual: " << begin_string_quote << needle << "\"\n" + << "Expected: " << (expected_to_be_substring ? "" : "not ") + << "a substring of " << haystack_expr << "\n" + << "Which is: " << begin_string_quote << haystack << "\""; +} + +} // namespace + +// IsSubstring() and IsNotSubstring() check whether needle is a +// substring of haystack (NULL is considered a substring of itself +// only), and return an appropriate error message when they fail. + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +#if GTEST_HAS_STD_WSTRING +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +#if GTEST_OS_WINDOWS + +namespace { + +// Helper function for IsHRESULT{SuccessFailure} predicates +AssertionResult HRESULTFailureHelper(const char* expr, + const char* expected, + long hr) { // NOLINT +# if GTEST_OS_WINDOWS_MOBILE + + // Windows CE doesn't support FormatMessage. + const char error_text[] = ""; + +# else + + // Looks up the human-readable system message for the HRESULT code + // and since we're not passing any params to FormatMessage, we don't + // want inserts expanded. + const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD kBufSize = 4096; // String::Format can't exceed this length. + // Gets the system's human readable message string for this HRESULT. + char error_text[kBufSize] = { '\0' }; + DWORD message_length = ::FormatMessageA(kFlags, + 0, // no source, we're asking system + hr, // the error + 0, // no line width restrictions + error_text, // output buffer + kBufSize, // buf size + NULL); // no arguments for inserts + // Trims tailing white space (FormatMessage leaves a trailing cr-lf) + for (; message_length && IsSpace(error_text[message_length - 1]); + --message_length) { + error_text[message_length - 1] = '\0'; + } + +# endif // GTEST_OS_WINDOWS_MOBILE + + const String error_hex(String::Format("0x%08X ", hr)); + return ::testing::AssertionFailure() + << "Expected: " << expr << " " << expected << ".\n" + << " Actual: " << error_hex << error_text << "\n"; +} + +} // namespace + +AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT + if (SUCCEEDED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "succeeds", hr); +} + +AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT + if (FAILED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "fails", hr); +} + +#endif // GTEST_OS_WINDOWS + +// Utility functions for encoding Unicode text (wide strings) in +// UTF-8. + +// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8 +// like this: +// +// Code-point length Encoding +// 0 - 7 bits 0xxxxxxx +// 8 - 11 bits 110xxxxx 10xxxxxx +// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx +// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + +// The maximum code-point a one-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint1 = (static_cast(1) << 7) - 1; + +// The maximum code-point a two-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint2 = (static_cast(1) << (5 + 6)) - 1; + +// The maximum code-point a three-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint3 = (static_cast(1) << (4 + 2*6)) - 1; + +// The maximum code-point a four-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint4 = (static_cast(1) << (3 + 3*6)) - 1; + +// Chops off the n lowest bits from a bit pattern. Returns the n +// lowest bits. As a side effect, the original bit pattern will be +// shifted to the right by n bits. +inline UInt32 ChopLowBits(UInt32* bits, int n) { + const UInt32 low_bits = *bits & ((static_cast(1) << n) - 1); + *bits >>= n; + return low_bits; +} + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// The output buffer str must containt at least 32 characters. +// The function returns the address of the output buffer. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. +char* CodePointToUtf8(UInt32 code_point, char* str) { + if (code_point <= kMaxCodePoint1) { + str[1] = '\0'; + str[0] = static_cast(code_point); // 0xxxxxxx + } else if (code_point <= kMaxCodePoint2) { + str[2] = '\0'; + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xC0 | code_point); // 110xxxxx + } else if (code_point <= kMaxCodePoint3) { + str[3] = '\0'; + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xE0 | code_point); // 1110xxxx + } else if (code_point <= kMaxCodePoint4) { + str[4] = '\0'; + str[3] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xF0 | code_point); // 11110xxx + } else { + // The longest string String::Format can produce when invoked + // with these parameters is 28 character long (not including + // the terminating nul character). We are asking for 32 character + // buffer just in case. This is also enough for strncpy to + // null-terminate the destination string. + posix::StrNCpy( + str, String::Format("(Invalid Unicode 0x%X)", code_point).c_str(), 32); + str[31] = '\0'; // Makes sure no change in the format to strncpy leaves + // the result unterminated. + } + return str; +} + +// The following two functions only make sense if the the system +// uses UTF-16 for wide string encoding. All supported systems +// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16. + +// Determines if the arguments constitute UTF-16 surrogate pair +// and thus should be combined into a single Unicode code point +// using CreateCodePointFromUtf16SurrogatePair. +inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) { + return sizeof(wchar_t) == 2 && + (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00; +} + +// Creates a Unicode code point from UTF16 surrogate pair. +inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first, + wchar_t second) { + const UInt32 mask = (1 << 10) - 1; + return (sizeof(wchar_t) == 2) ? + (((first & mask) << 10) | (second & mask)) + 0x10000 : + // This function should not be called when the condition is + // false, but we provide a sensible default in case it is. + static_cast(first); +} + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +String WideStringToUtf8(const wchar_t* str, int num_chars) { + if (num_chars == -1) + num_chars = static_cast(wcslen(str)); + + ::std::stringstream stream; + for (int i = 0; i < num_chars; ++i) { + UInt32 unicode_code_point; + + if (str[i] == L'\0') { + break; + } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) { + unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i], + str[i + 1]); + i++; + } else { + unicode_code_point = static_cast(str[i]); + } + + char buffer[32]; // CodePointToUtf8 requires a buffer this big. + stream << CodePointToUtf8(unicode_code_point, buffer); + } + return StringStreamToString(&stream); +} + +// Converts a wide C string to a String using the UTF-8 encoding. +// NULL will be converted to "(null)". +String String::ShowWideCString(const wchar_t * wide_c_str) { + if (wide_c_str == NULL) return String("(null)"); + + return String(internal::WideStringToUtf8(wide_c_str, -1).c_str()); +} + +// Similar to ShowWideCString(), except that this function encloses +// the converted string in double quotes. +String String::ShowWideCStringQuoted(const wchar_t* wide_c_str) { + if (wide_c_str == NULL) return String("(null)"); + + return String::Format("L\"%s\"", + String::ShowWideCString(wide_c_str).c_str()); +} + +// Compares two wide C strings. Returns true iff they have the same +// content. +// +// Unlike wcscmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + + return wcscmp(lhs, rhs) == 0; +} + +// Helper function for *_STREQ on wide strings. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual) { + if (String::WideCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + String::ShowWideCStringQuoted(expected), + String::ShowWideCStringQuoted(actual), + false); +} + +// Helper function for *_STRNE on wide strings. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2) { + if (!String::WideCStringEquals(s1, s2)) { + return AssertionSuccess(); + } + + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: " + << String::ShowWideCStringQuoted(s1) + << " vs " << String::ShowWideCStringQuoted(s2); +} + +// Compares two C strings, ignoring case. Returns true iff they have +// the same content. +// +// Unlike strcasecmp(), this function can handle NULL argument(s). A +// NULL C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) { + if (lhs == NULL) + return rhs == NULL; + if (rhs == NULL) + return false; + return posix::StrCaseCmp(lhs, rhs) == 0; +} + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. +bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + +#if GTEST_OS_WINDOWS + return _wcsicmp(lhs, rhs) == 0; +#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID + return wcscasecmp(lhs, rhs) == 0; +#else + // Android, Mac OS X and Cygwin don't define wcscasecmp. + // Other unknown OSes may not define it either. + wint_t left, right; + do { + left = towlower(*lhs++); + right = towlower(*rhs++); + } while (left && left == right); + return left == right; +#endif // OS selector +} + +// Compares this with another String. +// Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0 +// if this is greater than rhs. +int String::Compare(const String & rhs) const { + const char* const lhs_c_str = c_str(); + const char* const rhs_c_str = rhs.c_str(); + + if (lhs_c_str == NULL) { + return rhs_c_str == NULL ? 0 : -1; // NULL < anything except NULL + } else if (rhs_c_str == NULL) { + return 1; + } + + const size_t shorter_str_len = + length() <= rhs.length() ? length() : rhs.length(); + for (size_t i = 0; i != shorter_str_len; i++) { + if (lhs_c_str[i] < rhs_c_str[i]) { + return -1; + } else if (lhs_c_str[i] > rhs_c_str[i]) { + return 1; + } + } + return (length() < rhs.length()) ? -1 : + (length() > rhs.length()) ? 1 : 0; +} + +// Returns true iff this String ends with the given suffix. *Any* +// String is considered to end with a NULL or empty suffix. +bool String::EndsWith(const char* suffix) const { + if (suffix == NULL || CStringEquals(suffix, "")) return true; + + if (c_str() == NULL) return false; + + const size_t this_len = strlen(c_str()); + const size_t suffix_len = strlen(suffix); + return (this_len >= suffix_len) && + CStringEquals(c_str() + this_len - suffix_len, suffix); +} + +// Returns true iff this String ends with the given suffix, ignoring case. +// Any String is considered to end with a NULL or empty suffix. +bool String::EndsWithCaseInsensitive(const char* suffix) const { + if (suffix == NULL || CStringEquals(suffix, "")) return true; + + if (c_str() == NULL) return false; + + const size_t this_len = strlen(c_str()); + const size_t suffix_len = strlen(suffix); + return (this_len >= suffix_len) && + CaseInsensitiveCStringEquals(c_str() + this_len - suffix_len, suffix); +} + +// Formats a list of arguments to a String, using the same format +// spec string as for printf. +// +// We do not use the StringPrintf class as it is not universally +// available. +// +// The result is limited to 4096 characters (including the tailing 0). +// If 4096 characters are not enough to format the input, or if +// there's an error, "" is +// returned. +String String::Format(const char * format, ...) { + va_list args; + va_start(args, format); + + char buffer[4096]; + const int kBufferSize = sizeof(buffer)/sizeof(buffer[0]); + + // MSVC 8 deprecates vsnprintf(), so we want to suppress warning + // 4996 (deprecated function) there. +#ifdef _MSC_VER // We are using MSVC. +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996. + + const int size = vsnprintf(buffer, kBufferSize, format, args); + +# pragma warning(pop) // Restores the warning state. +#else // We are not using MSVC. + const int size = vsnprintf(buffer, kBufferSize, format, args); +#endif // _MSC_VER + va_end(args); + + // vsnprintf()'s behavior is not portable. When the buffer is not + // big enough, it returns a negative value in MSVC, and returns the + // needed buffer size on Linux. When there is an output error, it + // always returns a negative value. For simplicity, we lump the two + // error cases together. + if (size < 0 || size >= kBufferSize) { + return String(""); + } else { + return String(buffer, size); + } +} + +// Converts the buffer in a stringstream to a String, converting NUL +// bytes to "\\0" along the way. +String StringStreamToString(::std::stringstream* ss) { + const ::std::string& str = ss->str(); + const char* const start = str.c_str(); + const char* const end = start + str.length(); + + // We need to use a helper stringstream to do this transformation + // because String doesn't support push_back(). + ::std::stringstream helper; + for (const char* ch = start; ch != end; ++ch) { + if (*ch == '\0') { + helper << "\\0"; // Replaces NUL with "\\0"; + } else { + helper.put(*ch); + } + } + + return String(helper.str().c_str()); +} + +// Appends the user-supplied message to the Google-Test-generated message. +String AppendUserMessage(const String& gtest_msg, + const Message& user_msg) { + // Appends the user message if it's non-empty. + const String user_msg_string = user_msg.GetString(); + if (user_msg_string.empty()) { + return gtest_msg; + } + + Message msg; + msg << gtest_msg << "\n" << user_msg_string; + + return msg.GetString(); +} + +} // namespace internal + +// class TestResult + +// Creates an empty TestResult. +TestResult::TestResult() + : death_test_count_(0), + elapsed_time_(0) { +} + +// D'tor. +TestResult::~TestResult() { +} + +// Returns the i-th test part result among all the results. i can +// range from 0 to total_part_count() - 1. If i is not in that range, +// aborts the program. +const TestPartResult& TestResult::GetTestPartResult(int i) const { + if (i < 0 || i >= total_part_count()) + internal::posix::Abort(); + return test_part_results_.at(i); +} + +// Returns the i-th test property. i can range from 0 to +// test_property_count() - 1. If i is not in that range, aborts the +// program. +const TestProperty& TestResult::GetTestProperty(int i) const { + if (i < 0 || i >= test_property_count()) + internal::posix::Abort(); + return test_properties_.at(i); +} + +// Clears the test part results. +void TestResult::ClearTestPartResults() { + test_part_results_.clear(); +} + +// Adds a test part result to the list. +void TestResult::AddTestPartResult(const TestPartResult& test_part_result) { + test_part_results_.push_back(test_part_result); +} + +// Adds a test property to the list. If a property with the same key as the +// supplied property is already represented, the value of this test_property +// replaces the old value for that key. +void TestResult::RecordProperty(const TestProperty& test_property) { + if (!ValidateTestProperty(test_property)) { + return; + } + internal::MutexLock lock(&test_properites_mutex_); + const std::vector::iterator property_with_matching_key = + std::find_if(test_properties_.begin(), test_properties_.end(), + internal::TestPropertyKeyIs(test_property.key())); + if (property_with_matching_key == test_properties_.end()) { + test_properties_.push_back(test_property); + return; + } + property_with_matching_key->SetValue(test_property.value()); +} + +// Adds a failure if the key is a reserved attribute of Google Test +// testcase tags. Returns true if the property is valid. +bool TestResult::ValidateTestProperty(const TestProperty& test_property) { + internal::String key(test_property.key()); + if (key == "name" || key == "status" || key == "time" || key == "classname") { + ADD_FAILURE() + << "Reserved key used in RecordProperty(): " + << key + << " ('name', 'status', 'time', and 'classname' are reserved by " + << GTEST_NAME_ << ")"; + return false; + } + return true; +} + +// Clears the object. +void TestResult::Clear() { + test_part_results_.clear(); + test_properties_.clear(); + death_test_count_ = 0; + elapsed_time_ = 0; +} + +// Returns true iff the test failed. +bool TestResult::Failed() const { + for (int i = 0; i < total_part_count(); ++i) { + if (GetTestPartResult(i).failed()) + return true; + } + return false; +} + +// Returns true iff the test part fatally failed. +static bool TestPartFatallyFailed(const TestPartResult& result) { + return result.fatally_failed(); +} + +// Returns true iff the test fatally failed. +bool TestResult::HasFatalFailure() const { + return CountIf(test_part_results_, TestPartFatallyFailed) > 0; +} + +// Returns true iff the test part non-fatally failed. +static bool TestPartNonfatallyFailed(const TestPartResult& result) { + return result.nonfatally_failed(); +} + +// Returns true iff the test has a non-fatal failure. +bool TestResult::HasNonfatalFailure() const { + return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0; +} + +// Gets the number of all test parts. This is the sum of the number +// of successful test parts and the number of failed test parts. +int TestResult::total_part_count() const { + return static_cast(test_part_results_.size()); +} + +// Returns the number of the test properties. +int TestResult::test_property_count() const { + return static_cast(test_properties_.size()); +} + +// class Test + +// Creates a Test object. + +// The c'tor saves the values of all Google Test flags. +Test::Test() + : gtest_flag_saver_(new internal::GTestFlagSaver) { +} + +// The d'tor restores the values of all Google Test flags. +Test::~Test() { + delete gtest_flag_saver_; +} + +// Sets up the test fixture. +// +// A sub-class may override this. +void Test::SetUp() { +} + +// Tears down the test fixture. +// +// A sub-class may override this. +void Test::TearDown() { +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const char* key, const char* value) { + UnitTest::GetInstance()->RecordPropertyForCurrentTest(key, value); +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const char* key, int value) { + Message value_message; + value_message << value; + RecordProperty(key, value_message.GetString().c_str()); +} + +namespace internal { + +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const String& message) { + // This function is a friend of UnitTest and as such has access to + // AddTestPartResult. + UnitTest::GetInstance()->AddTestPartResult( + result_type, + NULL, // No info about the source file where the exception occurred. + -1, // We have no info on which line caused the exception. + message, + String()); // No stack trace, either. +} + +} // namespace internal + +// Google Test requires all tests in the same test case to use the same test +// fixture class. This function checks if the current test has the +// same fixture class as the first test in the current test case. If +// yes, it returns true; otherwise it generates a Google Test failure and +// returns false. +bool Test::HasSameFixtureClass() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + const TestCase* const test_case = impl->current_test_case(); + + // Info about the first test in the current test case. + const TestInfo* const first_test_info = test_case->test_info_list()[0]; + const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_; + const char* const first_test_name = first_test_info->name(); + + // Info about the current test. + const TestInfo* const this_test_info = impl->current_test_info(); + const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_; + const char* const this_test_name = this_test_info->name(); + + if (this_fixture_id != first_fixture_id) { + // Is the first test defined using TEST? + const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId(); + // Is this test defined using TEST? + const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId(); + + if (first_is_TEST || this_is_TEST) { + // The user mixed TEST and TEST_F in this test case - we'll tell + // him/her how to fix it. + + // Gets the name of the TEST and the name of the TEST_F. Note + // that first_is_TEST and this_is_TEST cannot both be true, as + // the fixture IDs are different for the two tests. + const char* const TEST_name = + first_is_TEST ? first_test_name : this_test_name; + const char* const TEST_F_name = + first_is_TEST ? this_test_name : first_test_name; + + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class, so mixing TEST_F and TEST in the same test case is\n" + << "illegal. In test case " << this_test_info->test_case_name() + << ",\n" + << "test " << TEST_F_name << " is defined using TEST_F but\n" + << "test " << TEST_name << " is defined using TEST. You probably\n" + << "want to change the TEST to TEST_F or move it to another test\n" + << "case."; + } else { + // The user defined two fixture classes with the same name in + // two namespaces - we'll tell him/her how to fix it. + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " + << this_test_info->test_case_name() << ",\n" + << "you defined test " << first_test_name + << " and test " << this_test_name << "\n" + << "using two different test fixture classes. This can happen if\n" + << "the two classes are from different namespaces or translation\n" + << "units and have the same name. You should probably rename one\n" + << "of the classes to put the tests into different test cases."; + } + return false; + } + + return true; +} + +#if GTEST_HAS_SEH + +// Adds an "exception thrown" fatal failure to the current test. This +// function returns its result via an output parameter pointer because VC++ +// prohibits creation of objects with destructors on stack in functions +// using __try (see error C2712). +static internal::String* FormatSehExceptionMessage(DWORD exception_code, + const char* location) { + Message message; + message << "SEH exception with code 0x" << std::setbase(16) << + exception_code << std::setbase(10) << " thrown in " << location << "."; + + return new internal::String(message.GetString()); +} + +#endif // GTEST_HAS_SEH + +#if GTEST_HAS_EXCEPTIONS + +// Adds an "exception thrown" fatal failure to the current test. +static internal::String FormatCxxExceptionMessage(const char* description, + const char* location) { + Message message; + if (description != NULL) { + message << "C++ exception with description \"" << description << "\""; + } else { + message << "Unknown C++ exception"; + } + message << " thrown in " << location << "."; + + return message.GetString(); +} + +static internal::String PrintTestPartResultToString( + const TestPartResult& test_part_result); + +// A failed Google Test assertion will throw an exception of this type when +// GTEST_FLAG(throw_on_failure) is true (if exceptions are enabled). We +// derive it from std::runtime_error, which is for errors presumably +// detectable only at run time. Since std::runtime_error inherits from +// std::exception, many testing frameworks know how to extract and print the +// message inside it. +class GoogleTestFailureException : public ::std::runtime_error { + public: + explicit GoogleTestFailureException(const TestPartResult& failure) + : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {} +}; +#endif // GTEST_HAS_EXCEPTIONS + +namespace internal { +// We put these helper functions in the internal namespace as IBM's xlC +// compiler rejects the code if they were declared static. + +// Runs the given method and handles SEH exceptions it throws, when +// SEH is supported; returns the 0-value for type Result in case of an +// SEH exception. (Microsoft compilers cannot handle SEH and C++ +// exceptions in the same function. Therefore, we provide a separate +// wrapper function for handling SEH exceptions.) +template +Result HandleSehExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { +#if GTEST_HAS_SEH + __try { + return (object->*method)(); + } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT + GetExceptionCode())) { + // We create the exception message on the heap because VC++ prohibits + // creation of objects with destructors on stack in functions using __try + // (see error C2712). + internal::String* exception_message = FormatSehExceptionMessage( + GetExceptionCode(), location); + internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure, + *exception_message); + delete exception_message; + return static_cast(0); + } +#else + (void)location; + return (object->*method)(); +#endif // GTEST_HAS_SEH +} + +// Runs the given method and catches and reports C++ and/or SEH-style +// exceptions, if they are supported; returns the 0-value for type +// Result in case of an SEH exception. +template +Result HandleExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { + // NOTE: The user code can affect the way in which Google Test handles + // exceptions by setting GTEST_FLAG(catch_exceptions), but only before + // RUN_ALL_TESTS() starts. It is technically possible to check the flag + // after the exception is caught and either report or re-throw the + // exception based on the flag's value: + // + // try { + // // Perform the test method. + // } catch (...) { + // if (GTEST_FLAG(catch_exceptions)) + // // Report the exception as failure. + // else + // throw; // Re-throws the original exception. + // } + // + // However, the purpose of this flag is to allow the program to drop into + // the debugger when the exception is thrown. On most platforms, once the + // control enters the catch block, the exception origin information is + // lost and the debugger will stop the program at the point of the + // re-throw in this function -- instead of at the point of the original + // throw statement in the code under test. For this reason, we perform + // the check early, sacrificing the ability to affect Google Test's + // exception handling in the method where the exception is thrown. + if (internal::GetUnitTestImpl()->catch_exceptions()) { +#if GTEST_HAS_EXCEPTIONS + try { + return HandleSehExceptionsInMethodIfSupported(object, method, location); + } catch (const GoogleTestFailureException&) { // NOLINT + // This exception doesn't originate in code under test. It makes no + // sense to report it as a test failure. + throw; + } catch (const std::exception& e) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(e.what(), location)); + } catch (...) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(NULL, location)); + } + return static_cast(0); +#else + return HandleSehExceptionsInMethodIfSupported(object, method, location); +#endif // GTEST_HAS_EXCEPTIONS + } else { + return (object->*method)(); + } +} + +} // namespace internal + +// Runs the test and updates the test result. +void Test::Run() { + if (!HasSameFixtureClass()) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()"); + // We will run the test only if SetUp() was successful. + if (!HasFatalFailure()) { + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TestBody, "the test body"); + } + + // However, we want to clean up as much as possible. Hence we will + // always call TearDown(), even if SetUp() or the test body has + // failed. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TearDown, "TearDown()"); +} + +// Returns true iff the current test has a fatal failure. +bool Test::HasFatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure(); +} + +// Returns true iff the current test has a non-fatal failure. +bool Test::HasNonfatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()-> + HasNonfatalFailure(); +} + +// class TestInfo + +// Constructs a TestInfo object. It assumes ownership of the test factory +// object. +// TODO(vladl@google.com): Make a_test_case_name and a_name const string&'s +// to signify they cannot be NULLs. +TestInfo::TestInfo(const char* a_test_case_name, + const char* a_name, + const char* a_type_param, + const char* a_value_param, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory) + : test_case_name_(a_test_case_name), + name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + value_param_(a_value_param ? new std::string(a_value_param) : NULL), + fixture_class_id_(fixture_class_id), + should_run_(false), + is_disabled_(false), + matches_filter_(false), + factory_(factory), + result_() {} + +// Destructs a TestInfo object. +TestInfo::~TestInfo() { delete factory_; } + +namespace internal { + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param: the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param: text representation of the test's value parameter, +// or NULL if this is not a value-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory) { + TestInfo* const test_info = + new TestInfo(test_case_name, name, type_param, value_param, + fixture_class_id, factory); + GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info); + return test_info; +} + +#if GTEST_HAS_PARAM_TEST +void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line) { + Message errors; + errors + << "Attempted redefinition of test case " << test_case_name << ".\n" + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " << test_case_name << ", you tried\n" + << "to define a test using a fixture class different from the one\n" + << "used earlier. This can happen if the two fixture classes are\n" + << "from different namespaces and have the same name. You should\n" + << "probably rename one of the classes to put the tests into different\n" + << "test cases."; + + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors.GetString().c_str()); +} +#endif // GTEST_HAS_PARAM_TEST + +} // namespace internal + +namespace { + +// A predicate that checks the test name of a TestInfo against a known +// value. +// +// This is used for implementation of the TestCase class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestNameIs is copyable. +class TestNameIs { + public: + // Constructor. + // + // TestNameIs has NO default constructor. + explicit TestNameIs(const char* name) + : name_(name) {} + + // Returns true iff the test name of test_info matches name_. + bool operator()(const TestInfo * test_info) const { + return test_info && internal::String(test_info->name()).Compare(name_) == 0; + } + + private: + internal::String name_; +}; + +} // namespace + +namespace internal { + +// This method expands all parameterized tests registered with macros TEST_P +// and INSTANTIATE_TEST_CASE_P into regular tests and registers those. +// This will be done just once during the program runtime. +void UnitTestImpl::RegisterParameterizedTests() { +#if GTEST_HAS_PARAM_TEST + if (!parameterized_tests_registered_) { + parameterized_test_registry_.RegisterTests(); + parameterized_tests_registered_ = true; + } +#endif +} + +} // namespace internal + +// Creates the test object, runs it, records its result, and then +// deletes it. +void TestInfo::Run() { + if (!should_run_) return; + + // Tells UnitTest where to store test result. + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_info(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + // Notifies the unit test event listeners that a test is about to start. + repeater->OnTestStart(*this); + + const TimeInMillis start = internal::GetTimeInMillis(); + + impl->os_stack_trace_getter()->UponLeavingGTest(); + + // Creates the test object. + Test* const test = internal::HandleExceptionsInMethodIfSupported( + factory_, &internal::TestFactoryBase::CreateTest, + "the test fixture's constructor"); + + // Runs the test only if the test object was created and its + // constructor didn't generate a fatal failure. + if ((test != NULL) && !Test::HasFatalFailure()) { + // This doesn't throw as all user code that can throw are wrapped into + // exception handling code. + test->Run(); + } + + // Deletes the test object. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + test, &Test::DeleteSelf_, "the test fixture's destructor"); + + result_.set_elapsed_time(internal::GetTimeInMillis() - start); + + // Notifies the unit test event listener that a test has just finished. + repeater->OnTestEnd(*this); + + // Tells UnitTest to stop associating assertion results to this + // test. + impl->set_current_test_info(NULL); +} + +// class TestCase + +// Gets the number of successful tests in this test case. +int TestCase::successful_test_count() const { + return CountIf(test_info_list_, TestPassed); +} + +// Gets the number of failed tests in this test case. +int TestCase::failed_test_count() const { + return CountIf(test_info_list_, TestFailed); +} + +int TestCase::disabled_test_count() const { + return CountIf(test_info_list_, TestDisabled); +} + +// Get the number of tests in this test case that should run. +int TestCase::test_to_run_count() const { + return CountIf(test_info_list_, ShouldRunTest); +} + +// Gets the number of all tests. +int TestCase::total_test_count() const { + return static_cast(test_info_list_.size()); +} + +// Creates a TestCase with the given name. +// +// Arguments: +// +// name: name of the test case +// a_type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase::TestCase(const char* a_name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) + : name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + set_up_tc_(set_up_tc), + tear_down_tc_(tear_down_tc), + should_run_(false), + elapsed_time_(0) { +} + +// Destructor of TestCase. +TestCase::~TestCase() { + // Deletes every Test in the collection. + ForEach(test_info_list_, internal::Delete); +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +const TestInfo* TestCase::GetTestInfo(int i) const { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +TestInfo* TestCase::GetMutableTestInfo(int i) { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Adds a test to this test case. Will delete the test upon +// destruction of the TestCase object. +void TestCase::AddTestInfo(TestInfo * test_info) { + test_info_list_.push_back(test_info); + test_indices_.push_back(static_cast(test_indices_.size())); +} + +// Runs every test in this TestCase. +void TestCase::Run() { + if (!should_run_) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_case(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + repeater->OnTestCaseStart(*this); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunSetUpTestCase, "SetUpTestCase()"); + + const internal::TimeInMillis start = internal::GetTimeInMillis(); + for (int i = 0; i < total_test_count(); i++) { + GetMutableTestInfo(i)->Run(); + } + elapsed_time_ = internal::GetTimeInMillis() - start; + + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunTearDownTestCase, "TearDownTestCase()"); + + repeater->OnTestCaseEnd(*this); + impl->set_current_test_case(NULL); +} + +// Clears the results of all tests in this test case. +void TestCase::ClearResult() { + ForEach(test_info_list_, TestInfo::ClearTestResult); +} + +// Shuffles the tests in this test case. +void TestCase::ShuffleTests(internal::Random* random) { + Shuffle(random, &test_indices_); +} + +// Restores the test order to before the first shuffle. +void TestCase::UnshuffleTests() { + for (size_t i = 0; i < test_indices_.size(); i++) { + test_indices_[i] = static_cast(i); + } +} + +// Formats a countable noun. Depending on its quantity, either the +// singular form or the plural form is used. e.g. +// +// FormatCountableNoun(1, "formula", "formuli") returns "1 formula". +// FormatCountableNoun(5, "book", "books") returns "5 books". +static internal::String FormatCountableNoun(int count, + const char * singular_form, + const char * plural_form) { + return internal::String::Format("%d %s", count, + count == 1 ? singular_form : plural_form); +} + +// Formats the count of tests. +static internal::String FormatTestCount(int test_count) { + return FormatCountableNoun(test_count, "test", "tests"); +} + +// Formats the count of test cases. +static internal::String FormatTestCaseCount(int test_case_count) { + return FormatCountableNoun(test_case_count, "test case", "test cases"); +} + +// Converts a TestPartResult::Type enum to human-friendly string +// representation. Both kNonFatalFailure and kFatalFailure are translated +// to "Failure", as the user usually doesn't care about the difference +// between the two when viewing the test result. +static const char * TestPartResultTypeToString(TestPartResult::Type type) { + switch (type) { + case TestPartResult::kSuccess: + return "Success"; + + case TestPartResult::kNonFatalFailure: + case TestPartResult::kFatalFailure: +#ifdef _MSC_VER + return "error: "; +#else + return "Failure\n"; +#endif + default: + return "Unknown result type"; + } +} + +// Prints a TestPartResult to a String. +static internal::String PrintTestPartResultToString( + const TestPartResult& test_part_result) { + return (Message() + << internal::FormatFileLocation(test_part_result.file_name(), + test_part_result.line_number()) + << " " << TestPartResultTypeToString(test_part_result.type()) + << test_part_result.message()).GetString(); +} + +// Prints a TestPartResult. +static void PrintTestPartResult(const TestPartResult& test_part_result) { + const internal::String& result = + PrintTestPartResultToString(test_part_result); + printf("%s\n", result.c_str()); + fflush(stdout); + // If the test program runs in Visual Studio or a debugger, the + // following statements add the test part result message to the Output + // window such that the user can double-click on it to jump to the + // corresponding source code location; otherwise they do nothing. +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + // We don't call OutputDebugString*() on Windows Mobile, as printing + // to stdout is done by OutputDebugString() there already - we don't + // want the same message printed twice. + ::OutputDebugStringA(result.c_str()); + ::OutputDebugStringA("\n"); +#endif +} + +// class PrettyUnitTestResultPrinter + +namespace internal { + +enum GTestColor { + COLOR_DEFAULT, + COLOR_RED, + COLOR_GREEN, + COLOR_YELLOW +}; + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns the character attribute for the given color. +WORD GetColorAttribute(GTestColor color) { + switch (color) { + case COLOR_RED: return FOREGROUND_RED; + case COLOR_GREEN: return FOREGROUND_GREEN; + case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN; + default: return 0; + } +} + +#else + +// Returns the ANSI color code for the given color. COLOR_DEFAULT is +// an invalid input. +const char* GetAnsiColorCode(GTestColor color) { + switch (color) { + case COLOR_RED: return "1"; + case COLOR_GREEN: return "2"; + case COLOR_YELLOW: return "3"; + default: return NULL; + }; +} + +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns true iff Google Test should use colors in the output. +bool ShouldUseColor(bool stdout_is_tty) { + const char* const gtest_color = GTEST_FLAG(color).c_str(); + + if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) { +#if GTEST_OS_WINDOWS + // On Windows the TERM variable is usually not set, but the + // console there does support colors. + return stdout_is_tty; +#else + // On non-Windows platforms, we rely on the TERM variable. + const char* const term = posix::GetEnv("TERM"); + const bool term_supports_color = + String::CStringEquals(term, "xterm") || + String::CStringEquals(term, "xterm-color") || + String::CStringEquals(term, "xterm-256color") || + String::CStringEquals(term, "screen") || + String::CStringEquals(term, "linux") || + String::CStringEquals(term, "cygwin"); + return stdout_is_tty && term_supports_color; +#endif // GTEST_OS_WINDOWS + } + + return String::CaseInsensitiveCStringEquals(gtest_color, "yes") || + String::CaseInsensitiveCStringEquals(gtest_color, "true") || + String::CaseInsensitiveCStringEquals(gtest_color, "t") || + String::CStringEquals(gtest_color, "1"); + // We take "yes", "true", "t", and "1" as meaning "yes". If the + // value is neither one of these nor "auto", we treat it as "no" to + // be conservative. +} + +// Helpers for printing colored strings to stdout. Note that on Windows, we +// cannot simply emit special characters and have the terminal change colors. +// This routine must actually emit the characters rather than return a string +// that would be colored when printed, as can be done on Linux. +void ColoredPrintf(GTestColor color, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS + const bool use_color = false; +#else + static const bool in_color_mode = + ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0); + const bool use_color = in_color_mode && (color != COLOR_DEFAULT); +#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS + // The '!= 0' comparison is necessary to satisfy MSVC 7.1. + + if (!use_color) { + vprintf(fmt, args); + va_end(args); + return; + } + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + + // Gets the current text color. + CONSOLE_SCREEN_BUFFER_INFO buffer_info; + GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); + const WORD old_color_attrs = buffer_info.wAttributes; + + // We need to flush the stream buffers into the console before each + // SetConsoleTextAttribute call lest it affect the text that is already + // printed but has not yet reached the console. + fflush(stdout); + SetConsoleTextAttribute(stdout_handle, + GetColorAttribute(color) | FOREGROUND_INTENSITY); + vprintf(fmt, args); + + fflush(stdout); + // Restores the text color. + SetConsoleTextAttribute(stdout_handle, old_color_attrs); +#else + printf("\033[0;3%sm", GetAnsiColorCode(color)); + vprintf(fmt, args); + printf("\033[m"); // Resets the terminal to default. +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + va_end(args); +} + +void PrintFullTestCommentIfPresent(const TestInfo& test_info) { + const char* const type_param = test_info.type_param(); + const char* const value_param = test_info.value_param(); + + if (type_param != NULL || value_param != NULL) { + printf(", where "); + if (type_param != NULL) { + printf("TypeParam = %s", type_param); + if (value_param != NULL) + printf(" and "); + } + if (value_param != NULL) { + printf("GetParam() = %s", value_param); + } + } +} + +// This class implements the TestEventListener interface. +// +// Class PrettyUnitTestResultPrinter is copyable. +class PrettyUnitTestResultPrinter : public TestEventListener { + public: + PrettyUnitTestResultPrinter() {} + static void PrintTestName(const char * test_case, const char * test) { + printf("%s.%s", test_case, test); + } + + // The following methods override what's in the TestEventListener class. + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} + + private: + static void PrintFailedTests(const UnitTest& unit_test); + + internal::String test_case_name_; +}; + + // Fired before each iteration of tests starts. +void PrettyUnitTestResultPrinter::OnTestIterationStart( + const UnitTest& unit_test, int iteration) { + if (GTEST_FLAG(repeat) != 1) + printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1); + + const char* const filter = GTEST_FLAG(filter).c_str(); + + // Prints the filter if it's not *. This reminds the user that some + // tests may be skipped. + if (!internal::String::CStringEquals(filter, kUniversalFilter)) { + ColoredPrintf(COLOR_YELLOW, + "Note: %s filter = %s\n", GTEST_NAME_, filter); + } + + if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) { + const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1); + ColoredPrintf(COLOR_YELLOW, + "Note: This is test shard %d of %s.\n", + static_cast(shard_index) + 1, + internal::posix::GetEnv(kTestTotalShards)); + } + + if (GTEST_FLAG(shuffle)) { + ColoredPrintf(COLOR_YELLOW, + "Note: Randomizing tests' orders with a seed of %d .\n", + unit_test.random_seed()); + } + + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("Running %s from %s.\n", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment set-up.\n"); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) { + test_case_name_ = test_case.name(); + const internal::String counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s", counts.c_str(), test_case_name_.c_str()); + if (test_case.type_param() == NULL) { + printf("\n"); + } else { + printf(", where TypeParam = %s\n", test_case.type_param()); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) { + ColoredPrintf(COLOR_GREEN, "[ RUN ] "); + PrintTestName(test_case_name_.c_str(), test_info.name()); + printf("\n"); + fflush(stdout); +} + +// Called after an assertion failure. +void PrettyUnitTestResultPrinter::OnTestPartResult( + const TestPartResult& result) { + // If the test part succeeded, we don't need to do anything. + if (result.type() == TestPartResult::kSuccess) + return; + + // Print failure message from the assertion (e.g. expected this and got that). + PrintTestPartResult(result); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { + if (test_info.result()->Passed()) { + ColoredPrintf(COLOR_GREEN, "[ OK ] "); + } else { + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + } + PrintTestName(test_case_name_.c_str(), test_info.name()); + if (test_info.result()->Failed()) + PrintFullTestCommentIfPresent(test_info); + + if (GTEST_FLAG(print_time)) { + printf(" (%s ms)\n", internal::StreamableToString( + test_info.result()->elapsed_time()).c_str()); + } else { + printf("\n"); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) { + if (!GTEST_FLAG(print_time)) return; + + test_case_name_ = test_case.name(); + const internal::String counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s (%s ms total)\n\n", + counts.c_str(), test_case_name_.c_str(), + internal::StreamableToString(test_case.elapsed_time()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment tear-down\n"); + fflush(stdout); +} + +// Internal helper for printing the list of failed tests. +void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) { + const int failed_test_count = unit_test.failed_test_count(); + if (failed_test_count == 0) { + return; + } + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + const TestCase& test_case = *unit_test.GetTestCase(i); + if (!test_case.should_run() || (test_case.failed_test_count() == 0)) { + continue; + } + for (int j = 0; j < test_case.total_test_count(); ++j) { + const TestInfo& test_info = *test_case.GetTestInfo(j); + if (!test_info.should_run() || test_info.result()->Passed()) { + continue; + } + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s.%s", test_case.name(), test_info.name()); + PrintFullTestCommentIfPresent(test_info); + printf("\n"); + } + } +} + +void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("%s from %s ran.", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + if (GTEST_FLAG(print_time)) { + printf(" (%s ms total)", + internal::StreamableToString(unit_test.elapsed_time()).c_str()); + } + printf("\n"); + ColoredPrintf(COLOR_GREEN, "[ PASSED ] "); + printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str()); + + int num_failures = unit_test.failed_test_count(); + if (!unit_test.Passed()) { + const int failed_test_count = unit_test.failed_test_count(); + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str()); + PrintFailedTests(unit_test); + printf("\n%2d FAILED %s\n", num_failures, + num_failures == 1 ? "TEST" : "TESTS"); + } + + int num_disabled = unit_test.disabled_test_count(); + if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) { + if (!num_failures) { + printf("\n"); // Add a spacer if no FAILURE banner is displayed. + } + ColoredPrintf(COLOR_YELLOW, + " YOU HAVE %d DISABLED %s\n\n", + num_disabled, + num_disabled == 1 ? "TEST" : "TESTS"); + } + // Ensure that Google Test output is printed before, e.g., heapchecker output. + fflush(stdout); +} + +// End PrettyUnitTestResultPrinter + +// class TestEventRepeater +// +// This class forwards events to other event listeners. +class TestEventRepeater : public TestEventListener { + public: + TestEventRepeater() : forwarding_enabled_(true) {} + virtual ~TestEventRepeater(); + void Append(TestEventListener *listener); + TestEventListener* Release(TestEventListener* listener); + + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled() const { return forwarding_enabled_; } + void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; } + + virtual void OnTestProgramStart(const UnitTest& unit_test); + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test); + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test); + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& unit_test); + + private: + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled_; + // The list of listeners that receive events. + std::vector listeners_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater); +}; + +TestEventRepeater::~TestEventRepeater() { + ForEach(listeners_, Delete); +} + +void TestEventRepeater::Append(TestEventListener *listener) { + listeners_.push_back(listener); +} + +// TODO(vladl@google.com): Factor the search functionality into Vector::Find. +TestEventListener* TestEventRepeater::Release(TestEventListener *listener) { + for (size_t i = 0; i < listeners_.size(); ++i) { + if (listeners_[i] == listener) { + listeners_.erase(listeners_.begin() + i); + return listener; + } + } + + return NULL; +} + +// Since most methods are very similar, use macros to reduce boilerplate. +// This defines a member that forwards the call to all listeners. +#define GTEST_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (size_t i = 0; i < listeners_.size(); i++) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} +// This defines a member that forwards the call to all listeners in reverse +// order. +#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} + +GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest) +GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest) +GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase) +GTEST_REPEATER_METHOD_(OnTestStart, TestInfo) +GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult) +GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo) +GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase) +GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest) + +#undef GTEST_REPEATER_METHOD_ +#undef GTEST_REVERSE_REPEATER_METHOD_ + +void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (size_t i = 0; i < listeners_.size(); i++) { + listeners_[i]->OnTestIterationStart(unit_test, iteration); + } + } +} + +void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { + listeners_[i]->OnTestIterationEnd(unit_test, iteration); + } + } +} + +// End TestEventRepeater + +// This class generates an XML output file. +class XmlUnitTestResultPrinter : public EmptyTestEventListener { + public: + explicit XmlUnitTestResultPrinter(const char* output_file); + + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + + private: + // Is c a whitespace character that is normalized to a space character + // when it appears in an XML attribute value? + static bool IsNormalizableWhitespace(char c) { + return c == 0x9 || c == 0xA || c == 0xD; + } + + // May c appear in a well-formed XML document? + static bool IsValidXmlCharacter(char c) { + return IsNormalizableWhitespace(c) || c >= 0x20; + } + + // Returns an XML-escaped copy of the input string str. If + // is_attribute is true, the text is meant to appear as an attribute + // value, and normalizable whitespace is preserved by replacing it + // with character references. + static String EscapeXml(const char* str, bool is_attribute); + + // Returns the given string with all characters invalid in XML removed. + static string RemoveInvalidXmlCharacters(const string& str); + + // Convenience wrapper around EscapeXml when str is an attribute value. + static String EscapeXmlAttribute(const char* str) { + return EscapeXml(str, true); + } + + // Convenience wrapper around EscapeXml when str is not an attribute value. + static String EscapeXmlText(const char* str) { return EscapeXml(str, false); } + + // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. + static void OutputXmlCDataSection(::std::ostream* stream, const char* data); + + // Streams an XML representation of a TestInfo object. + static void OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info); + + // Prints an XML representation of a TestCase object + static void PrintXmlTestCase(FILE* out, const TestCase& test_case); + + // Prints an XML summary of unit_test to output stream out. + static void PrintXmlUnitTest(FILE* out, const UnitTest& unit_test); + + // Produces a string representing the test properties in a result as space + // delimited XML attributes based on the property key="value" pairs. + // When the String is not empty, it includes a space at the beginning, + // to delimit this attribute from prior attributes. + static String TestPropertiesAsXmlAttributes(const TestResult& result); + + // The output file. + const String output_file_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter); +}; + +// Creates a new XmlUnitTestResultPrinter. +XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file) + : output_file_(output_file) { + if (output_file_.c_str() == NULL || output_file_.empty()) { + fprintf(stderr, "XML output file may not be null\n"); + fflush(stderr); + exit(EXIT_FAILURE); + } +} + +// Called after the unit test ends. +void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + FILE* xmlout = NULL; + FilePath output_file(output_file_); + FilePath output_dir(output_file.RemoveFileName()); + + if (output_dir.CreateDirectoriesRecursively()) { + xmlout = posix::FOpen(output_file_.c_str(), "w"); + } + if (xmlout == NULL) { + // TODO(wan): report the reason of the failure. + // + // We don't do it for now as: + // + // 1. There is no urgent need for it. + // 2. It's a bit involved to make the errno variable thread-safe on + // all three operating systems (Linux, Windows, and Mac OS). + // 3. To interpret the meaning of errno in a thread-safe way, + // we need the strerror_r() function, which is not available on + // Windows. + fprintf(stderr, + "Unable to open file \"%s\"\n", + output_file_.c_str()); + fflush(stderr); + exit(EXIT_FAILURE); + } + PrintXmlUnitTest(xmlout, unit_test); + fclose(xmlout); +} + +// Returns an XML-escaped copy of the input string str. If is_attribute +// is true, the text is meant to appear as an attribute value, and +// normalizable whitespace is preserved by replacing it with character +// references. +// +// Invalid XML characters in str, if any, are stripped from the output. +// It is expected that most, if not all, of the text processed by this +// module will consist of ordinary English text. +// If this module is ever modified to produce version 1.1 XML output, +// most invalid characters can be retained using character references. +// TODO(wan): It might be nice to have a minimally invasive, human-readable +// escaping scheme for invalid characters, rather than dropping them. +String XmlUnitTestResultPrinter::EscapeXml(const char* str, bool is_attribute) { + Message m; + + if (str != NULL) { + for (const char* src = str; *src; ++src) { + switch (*src) { + case '<': + m << "<"; + break; + case '>': + m << ">"; + break; + case '&': + m << "&"; + break; + case '\'': + if (is_attribute) + m << "'"; + else + m << '\''; + break; + case '"': + if (is_attribute) + m << """; + else + m << '"'; + break; + default: + if (IsValidXmlCharacter(*src)) { + if (is_attribute && IsNormalizableWhitespace(*src)) + m << String::Format("&#x%02X;", unsigned(*src)); + else + m << *src; + } + break; + } + } + } + + return m.GetString(); +} + +// Returns the given string with all characters invalid in XML removed. +// Currently invalid characters are dropped from the string. An +// alternative is to replace them with certain characters such as . or ?. +string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(const string& str) { + string output; + output.reserve(str.size()); + for (string::const_iterator it = str.begin(); it != str.end(); ++it) + if (IsValidXmlCharacter(*it)) + output.push_back(*it); + + return output; +} + +// The following routines generate an XML representation of a UnitTest +// object. +// +// This is how Google Test concepts map to the DTD: +// +// <-- corresponds to a UnitTest object +// <-- corresponds to a TestCase object +// <-- corresponds to a TestInfo object +// ... +// ... +// ... +// <-- individual assertion failures +// +// +// + +// Formats the given time in milliseconds as seconds. +std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) { + ::std::stringstream ss; + ss << ms/1000.0; + return ss.str(); +} + +// Streams an XML CDATA section, escaping invalid CDATA sequences as needed. +void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, + const char* data) { + const char* segment = data; + *stream << ""); + if (next_segment != NULL) { + stream->write( + segment, static_cast(next_segment - segment)); + *stream << "]]>]]>"); + } else { + *stream << segment; + break; + } + } + *stream << "]]>"; +} + +// Prints an XML representation of a TestInfo object. +// TODO(wan): There is also value in printing properties with the plain printer. +void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info) { + const TestResult& result = *test_info.result(); + *stream << " \n"; + *stream << " "; + const string location = internal::FormatCompilerIndependentFileLocation( + part.file_name(), part.line_number()); + const string message = location + "\n" + part.message(); + OutputXmlCDataSection(stream, + RemoveInvalidXmlCharacters(message).c_str()); + *stream << "\n"; + } + } + + if (failures == 0) + *stream << " />\n"; + else + *stream << " \n"; +} + +// Prints an XML representation of a TestCase object +void XmlUnitTestResultPrinter::PrintXmlTestCase(FILE* out, + const TestCase& test_case) { + fprintf(out, + " \n", + FormatTimeInMillisAsSeconds(test_case.elapsed_time()).c_str()); + for (int i = 0; i < test_case.total_test_count(); ++i) { + ::std::stringstream stream; + OutputXmlTestInfo(&stream, test_case.name(), *test_case.GetTestInfo(i)); + fprintf(out, "%s", StringStreamToString(&stream).c_str()); + } + fprintf(out, " \n"); +} + +// Prints an XML summary of unit_test to output stream out. +void XmlUnitTestResultPrinter::PrintXmlUnitTest(FILE* out, + const UnitTest& unit_test) { + fprintf(out, "\n"); + fprintf(out, + "\n"); + for (int i = 0; i < unit_test.total_test_case_count(); ++i) + PrintXmlTestCase(out, *unit_test.GetTestCase(i)); + fprintf(out, "\n"); +} + +// Produces a string representing the test properties in a result as space +// delimited XML attributes based on the property key="value" pairs. +String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( + const TestResult& result) { + Message attributes; + for (int i = 0; i < result.test_property_count(); ++i) { + const TestProperty& property = result.GetTestProperty(i); + attributes << " " << property.key() << "=" + << "\"" << EscapeXmlAttribute(property.value()) << "\""; + } + return attributes.GetString(); +} + +// End XmlUnitTestResultPrinter + +#if GTEST_CAN_STREAM_RESULTS_ + +// Streams test results to the given port on the given host machine. +class StreamingListener : public EmptyTestEventListener { + public: + // Escapes '=', '&', '%', and '\n' characters in str as "%xx". + static string UrlEncode(const char* str); + + StreamingListener(const string& host, const string& port) + : sockfd_(-1), host_name_(host), port_num_(port) { + MakeConnection(); + Send("gtest_streaming_protocol_version=1.0\n"); + } + + virtual ~StreamingListener() { + if (sockfd_ != -1) + CloseConnection(); + } + + void OnTestProgramStart(const UnitTest& /* unit_test */) { + Send("event=TestProgramStart\n"); + } + + void OnTestProgramEnd(const UnitTest& unit_test) { + // Note that Google Test current only report elapsed time for each + // test iteration, not for the entire test program. + Send(String::Format("event=TestProgramEnd&passed=%d\n", + unit_test.Passed())); + + // Notify the streaming server to stop. + CloseConnection(); + } + + void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) { + Send(String::Format("event=TestIterationStart&iteration=%d\n", + iteration)); + } + + void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) { + Send(String::Format("event=TestIterationEnd&passed=%d&elapsed_time=%sms\n", + unit_test.Passed(), + StreamableToString(unit_test.elapsed_time()).c_str())); + } + + void OnTestCaseStart(const TestCase& test_case) { + Send(String::Format("event=TestCaseStart&name=%s\n", test_case.name())); + } + + void OnTestCaseEnd(const TestCase& test_case) { + Send(String::Format("event=TestCaseEnd&passed=%d&elapsed_time=%sms\n", + test_case.Passed(), + StreamableToString(test_case.elapsed_time()).c_str())); + } + + void OnTestStart(const TestInfo& test_info) { + Send(String::Format("event=TestStart&name=%s\n", test_info.name())); + } + + void OnTestEnd(const TestInfo& test_info) { + Send(String::Format( + "event=TestEnd&passed=%d&elapsed_time=%sms\n", + (test_info.result())->Passed(), + StreamableToString((test_info.result())->elapsed_time()).c_str())); + } + + void OnTestPartResult(const TestPartResult& test_part_result) { + const char* file_name = test_part_result.file_name(); + if (file_name == NULL) + file_name = ""; + Send(String::Format("event=TestPartResult&file=%s&line=%d&message=", + UrlEncode(file_name).c_str(), + test_part_result.line_number())); + Send(UrlEncode(test_part_result.message()) + "\n"); + } + + private: + // Creates a client socket and connects to the server. + void MakeConnection(); + + // Closes the socket. + void CloseConnection() { + GTEST_CHECK_(sockfd_ != -1) + << "CloseConnection() can be called only when there is a connection."; + + close(sockfd_); + sockfd_ = -1; + } + + // Sends a string to the socket. + void Send(const string& message) { + GTEST_CHECK_(sockfd_ != -1) + << "Send() can be called only when there is a connection."; + + const int len = static_cast(message.length()); + if (write(sockfd_, message.c_str(), len) != len) { + GTEST_LOG_(WARNING) + << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; + } + } + + int sockfd_; // socket file descriptor + const string host_name_; + const string port_num_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); +}; // class StreamingListener + +// Checks if str contains '=', '&', '%' or '\n' characters. If yes, +// replaces them by "%xx" where xx is their hexadecimal value. For +// example, replaces "=" with "%3D". This algorithm is O(strlen(str)) +// in both time and space -- important as the input str may contain an +// arbitrarily long test failure message and stack trace. +string StreamingListener::UrlEncode(const char* str) { + string result; + result.reserve(strlen(str) + 1); + for (char ch = *str; ch != '\0'; ch = *++str) { + switch (ch) { + case '%': + case '=': + case '&': + case '\n': + result.append(String::Format("%%%02x", static_cast(ch))); + break; + default: + result.push_back(ch); + break; + } + } + return result; +} + +void StreamingListener::MakeConnection() { + GTEST_CHECK_(sockfd_ == -1) + << "MakeConnection() can't be called when there is already a connection."; + + addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. + hints.ai_socktype = SOCK_STREAM; + addrinfo* servinfo = NULL; + + // Use the getaddrinfo() to get a linked list of IP addresses for + // the given host name. + const int error_num = getaddrinfo( + host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); + if (error_num != 0) { + GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: " + << gai_strerror(error_num); + } + + // Loop through all the results and connect to the first we can. + for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL; + cur_addr = cur_addr->ai_next) { + sockfd_ = socket( + cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol); + if (sockfd_ != -1) { + // Connect the client socket to the server socket. + if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) { + close(sockfd_); + sockfd_ = -1; + } + } + } + + freeaddrinfo(servinfo); // all done with this structure + + if (sockfd_ == -1) { + GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to " + << host_name_ << ":" << port_num_; + } +} + +// End of class Streaming Listener +#endif // GTEST_CAN_STREAM_RESULTS__ + +// Class ScopedTrace + +// Pushes the given source file location and message onto a per-thread +// trace stack maintained by Google Test. +// L < UnitTest::mutex_ +ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) { + TraceInfo trace; + trace.file = file; + trace.line = line; + trace.message = message.GetString(); + + UnitTest::GetInstance()->PushGTestTrace(trace); +} + +// Pops the info pushed by the c'tor. +// L < UnitTest::mutex_ +ScopedTrace::~ScopedTrace() { + UnitTest::GetInstance()->PopGTestTrace(); +} + + +// class OsStackTraceGetter + +// Returns the current OS stack trace as a String. Parameters: +// +// max_depth - the maximum number of stack frames to be included +// in the trace. +// skip_count - the number of top frames to be skipped; doesn't count +// against max_depth. +// +// L < mutex_ +// We use "L < mutex_" to denote that the function may acquire mutex_. +String OsStackTraceGetter::CurrentStackTrace(int, int) { + return String(""); +} + +// L < mutex_ +void OsStackTraceGetter::UponLeavingGTest() { +} + +const char* const +OsStackTraceGetter::kElidedFramesMarker = + "... " GTEST_NAME_ " internal frames ..."; + +} // namespace internal + +// class TestEventListeners + +TestEventListeners::TestEventListeners() + : repeater_(new internal::TestEventRepeater()), + default_result_printer_(NULL), + default_xml_generator_(NULL) { +} + +TestEventListeners::~TestEventListeners() { delete repeater_; } + +// Returns the standard listener responsible for the default console +// output. Can be removed from the listeners list to shut down default +// console output. Note that removing this object from the listener list +// with Release transfers its ownership to the user. +void TestEventListeners::Append(TestEventListener* listener) { + repeater_->Append(listener); +} + +// Removes the given event listener from the list and returns it. It then +// becomes the caller's responsibility to delete the listener. Returns +// NULL if the listener is not found in the list. +TestEventListener* TestEventListeners::Release(TestEventListener* listener) { + if (listener == default_result_printer_) + default_result_printer_ = NULL; + else if (listener == default_xml_generator_) + default_xml_generator_ = NULL; + return repeater_->Release(listener); +} + +// Returns repeater that broadcasts the TestEventListener events to all +// subscribers. +TestEventListener* TestEventListeners::repeater() { return repeater_; } + +// Sets the default_result_printer attribute to the provided listener. +// The listener is also added to the listener list and previous +// default_result_printer is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) { + if (default_result_printer_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_result_printer_); + default_result_printer_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Sets the default_xml_generator attribute to the provided listener. The +// listener is also added to the listener list and previous +// default_xml_generator is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) { + if (default_xml_generator_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_xml_generator_); + default_xml_generator_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Controls whether events will be forwarded by the repeater to the +// listeners in the list. +bool TestEventListeners::EventForwardingEnabled() const { + return repeater_->forwarding_enabled(); +} + +void TestEventListeners::SuppressEventForwarding() { + repeater_->set_forwarding_enabled(false); +} + +// class UnitTest + +// Gets the singleton UnitTest object. The first time this method is +// called, a UnitTest object is constructed and returned. Consecutive +// calls will return the same object. +// +// We don't protect this under mutex_ as a user is not supposed to +// call this before main() starts, from which point on the return +// value will never change. +UnitTest * UnitTest::GetInstance() { + // When compiled with MSVC 7.1 in optimized mode, destroying the + // UnitTest object upon exiting the program messes up the exit code, + // causing successful tests to appear failed. We have to use a + // different implementation in this case to bypass the compiler bug. + // This implementation makes the compiler happy, at the cost of + // leaking the UnitTest object. + + // CodeGear C++Builder insists on a public destructor for the + // default implementation. Use this implementation to keep good OO + // design with private destructor. + +#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) + static UnitTest* const instance = new UnitTest; + return instance; +#else + static UnitTest instance; + return &instance; +#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) +} + +// Gets the number of successful test cases. +int UnitTest::successful_test_case_count() const { + return impl()->successful_test_case_count(); +} + +// Gets the number of failed test cases. +int UnitTest::failed_test_case_count() const { + return impl()->failed_test_case_count(); +} + +// Gets the number of all test cases. +int UnitTest::total_test_case_count() const { + return impl()->total_test_case_count(); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTest::test_case_to_run_count() const { + return impl()->test_case_to_run_count(); +} + +// Gets the number of successful tests. +int UnitTest::successful_test_count() const { + return impl()->successful_test_count(); +} + +// Gets the number of failed tests. +int UnitTest::failed_test_count() const { return impl()->failed_test_count(); } + +// Gets the number of disabled tests. +int UnitTest::disabled_test_count() const { + return impl()->disabled_test_count(); +} + +// Gets the number of all tests. +int UnitTest::total_test_count() const { return impl()->total_test_count(); } + +// Gets the number of tests that should run. +int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); } + +// Gets the elapsed time, in milliseconds. +internal::TimeInMillis UnitTest::elapsed_time() const { + return impl()->elapsed_time(); +} + +// Returns true iff the unit test passed (i.e. all test cases passed). +bool UnitTest::Passed() const { return impl()->Passed(); } + +// Returns true iff the unit test failed (i.e. some test case failed +// or something outside of all tests failed). +bool UnitTest::Failed() const { return impl()->Failed(); } + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +const TestCase* UnitTest::GetTestCase(int i) const { + return impl()->GetTestCase(i); +} + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +TestCase* UnitTest::GetMutableTestCase(int i) { + return impl()->GetMutableTestCase(i); +} + +// Returns the list of event listeners that can be used to track events +// inside Google Test. +TestEventListeners& UnitTest::listeners() { + return *impl()->listeners(); +} + +// Registers and returns a global test environment. When a test +// program is run, all global test environments will be set-up in the +// order they were registered. After all tests in the program have +// finished, all global test environments will be torn-down in the +// *reverse* order they were registered. +// +// The UnitTest object takes ownership of the given environment. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +Environment* UnitTest::AddEnvironment(Environment* env) { + if (env == NULL) { + return NULL; + } + + impl_->environments().push_back(env); + return env; +} + +// Adds a TestPartResult to the current TestResult object. All Google Test +// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call +// this to report their results. The user code should use the +// assertion macros instead of calling this directly. +// L < mutex_ +void UnitTest::AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const internal::String& message, + const internal::String& os_stack_trace) { + Message msg; + msg << message; + + internal::MutexLock lock(&mutex_); + if (impl_->gtest_trace_stack().size() > 0) { + msg << "\n" << GTEST_NAME_ << " trace:"; + + for (int i = static_cast(impl_->gtest_trace_stack().size()); + i > 0; --i) { + const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1]; + msg << "\n" << internal::FormatFileLocation(trace.file, trace.line) + << " " << trace.message; + } + } + + if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) { + msg << internal::kStackTraceMarker << os_stack_trace; + } + + const TestPartResult result = + TestPartResult(result_type, file_name, line_number, + msg.GetString().c_str()); + impl_->GetTestPartResultReporterForCurrentThread()-> + ReportTestPartResult(result); + + if (result_type != TestPartResult::kSuccess) { + // gtest_break_on_failure takes precedence over + // gtest_throw_on_failure. This allows a user to set the latter + // in the code (perhaps in order to use Google Test assertions + // with another testing framework) and specify the former on the + // command line for debugging. + if (GTEST_FLAG(break_on_failure)) { +#if GTEST_OS_WINDOWS + // Using DebugBreak on Windows allows gtest to still break into a debugger + // when a failure happens and both the --gtest_break_on_failure and + // the --gtest_catch_exceptions flags are specified. + DebugBreak(); +#else + // Dereference NULL through a volatile pointer to prevent the compiler + // from removing. We use this rather than abort() or __builtin_trap() for + // portability: Symbian doesn't implement abort() well, and some debuggers + // don't correctly trap abort(). + *static_cast(NULL) = 1; +#endif // GTEST_OS_WINDOWS + } else if (GTEST_FLAG(throw_on_failure)) { +#if GTEST_HAS_EXCEPTIONS + throw GoogleTestFailureException(result); +#else + // We cannot call abort() as it generates a pop-up in debug mode + // that cannot be suppressed in VC 7.1 or below. + exit(1); +#endif + } + } +} + +// Creates and adds a property to the current TestResult. If a property matching +// the supplied value already exists, updates its value instead. +void UnitTest::RecordPropertyForCurrentTest(const char* key, + const char* value) { + const TestProperty test_property(key, value); + impl_->current_test_result()->RecordProperty(test_property); +} + +// Runs all tests in this UnitTest object and prints the result. +// Returns 0 if successful, or 1 otherwise. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +int UnitTest::Run() { + // Captures the value of GTEST_FLAG(catch_exceptions). This value will be + // used for the duration of the program. + impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions)); + +#if GTEST_HAS_SEH + const bool in_death_test_child_process = + internal::GTEST_FLAG(internal_run_death_test).length() > 0; + + // Either the user wants Google Test to catch exceptions thrown by the + // tests or this is executing in the context of death test child + // process. In either case the user does not want to see pop-up dialogs + // about crashes - they are expected. + if (impl()->catch_exceptions() || in_death_test_child_process) { + +# if !GTEST_OS_WINDOWS_MOBILE + // SetErrorMode doesn't exist on CE. + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | + SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); +# endif // !GTEST_OS_WINDOWS_MOBILE + +# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE + // Death test children can be terminated with _abort(). On Windows, + // _abort() can show a dialog with a warning message. This forces the + // abort message to go to stderr instead. + _set_error_mode(_OUT_TO_STDERR); +# endif + +# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE + // In the debug version, Visual Studio pops up a separate dialog + // offering a choice to debug the aborted program. We need to suppress + // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement + // executed. Google Test will notify the user of any unexpected + // failure via stderr. + // + // VC++ doesn't define _set_abort_behavior() prior to the version 8.0. + // Users of prior VC versions shall suffer the agony and pain of + // clicking through the countless debug dialogs. + // TODO(vladl@google.com): find a way to suppress the abort dialog() in the + // debug mode when compiled with VC 7.1 or lower. + if (!GTEST_FLAG(break_on_failure)) + _set_abort_behavior( + 0x0, // Clear the following flags: + _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump. +# endif + + } +#endif // GTEST_HAS_SEH + + return internal::HandleExceptionsInMethodIfSupported( + impl(), + &internal::UnitTestImpl::RunAllTests, + "auxiliary test code (environments or event listeners)") ? 0 : 1; +} + +// Returns the working directory when the first TEST() or TEST_F() was +// executed. +const char* UnitTest::original_working_dir() const { + return impl_->original_working_dir_.c_str(); +} + +// Returns the TestCase object for the test that's currently running, +// or NULL if no test is running. +// L < mutex_ +const TestCase* UnitTest::current_test_case() const { + internal::MutexLock lock(&mutex_); + return impl_->current_test_case(); +} + +// Returns the TestInfo object for the test that's currently running, +// or NULL if no test is running. +// L < mutex_ +const TestInfo* UnitTest::current_test_info() const { + internal::MutexLock lock(&mutex_); + return impl_->current_test_info(); +} + +// Returns the random seed used at the start of the current test run. +int UnitTest::random_seed() const { return impl_->random_seed(); } + +#if GTEST_HAS_PARAM_TEST +// Returns ParameterizedTestCaseRegistry object used to keep track of +// value-parameterized tests and instantiate and register them. +// L < mutex_ +internal::ParameterizedTestCaseRegistry& + UnitTest::parameterized_test_registry() { + return impl_->parameterized_test_registry(); +} +#endif // GTEST_HAS_PARAM_TEST + +// Creates an empty UnitTest. +UnitTest::UnitTest() { + impl_ = new internal::UnitTestImpl(this); +} + +// Destructor of UnitTest. +UnitTest::~UnitTest() { + delete impl_; +} + +// Pushes a trace defined by SCOPED_TRACE() on to the per-thread +// Google Test trace stack. +// L < mutex_ +void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().push_back(trace); +} + +// Pops a trace from the per-thread Google Test trace stack. +// L < mutex_ +void UnitTest::PopGTestTrace() { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().pop_back(); +} + +namespace internal { + +UnitTestImpl::UnitTestImpl(UnitTest* parent) + : parent_(parent), +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4355) // Temporarily disables warning 4355 + // (using this in initializer). + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), +# pragma warning(pop) // Restores the warning state again. +#else + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), +#endif // _MSC_VER + global_test_part_result_repoter_( + &default_global_test_part_result_reporter_), + per_thread_test_part_result_reporter_( + &default_per_thread_test_part_result_reporter_), +#if GTEST_HAS_PARAM_TEST + parameterized_test_registry_(), + parameterized_tests_registered_(false), +#endif // GTEST_HAS_PARAM_TEST + last_death_test_case_(-1), + current_test_case_(NULL), + current_test_info_(NULL), + ad_hoc_test_result_(), + os_stack_trace_getter_(NULL), + post_flag_parse_init_performed_(false), + random_seed_(0), // Will be overridden by the flag before first use. + random_(0), // Will be reseeded before first use. + elapsed_time_(0), +#if GTEST_HAS_DEATH_TEST + internal_run_death_test_flag_(NULL), + death_test_factory_(new DefaultDeathTestFactory), +#endif + // Will be overridden by the flag before first use. + catch_exceptions_(false) { + listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter); +} + +UnitTestImpl::~UnitTestImpl() { + // Deletes every TestCase. + ForEach(test_cases_, internal::Delete); + + // Deletes every Environment. + ForEach(environments_, internal::Delete); + + delete os_stack_trace_getter_; +} + +#if GTEST_HAS_DEATH_TEST +// Disables event forwarding if the control is currently in a death test +// subprocess. Must not be called before InitGoogleTest. +void UnitTestImpl::SuppressTestEventsIfInSubprocess() { + if (internal_run_death_test_flag_.get() != NULL) + listeners()->SuppressEventForwarding(); +} +#endif // GTEST_HAS_DEATH_TEST + +// Initializes event listeners performing XML output as specified by +// UnitTestOptions. Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureXmlOutput() { + const String& output_format = UnitTestOptions::GetOutputFormat(); + if (output_format == "xml") { + listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str())); + } else if (output_format != "") { + printf("WARNING: unrecognized output format \"%s\" ignored.\n", + output_format.c_str()); + fflush(stdout); + } +} + +#if GTEST_CAN_STREAM_RESULTS_ +// Initializes event listeners for streaming test results in String form. +// Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureStreamingOutput() { + const string& target = GTEST_FLAG(stream_result_to); + if (!target.empty()) { + const size_t pos = target.find(':'); + if (pos != string::npos) { + listeners()->Append(new StreamingListener(target.substr(0, pos), + target.substr(pos+1))); + } else { + printf("WARNING: unrecognized streaming target \"%s\" ignored.\n", + target.c_str()); + fflush(stdout); + } + } +} +#endif // GTEST_CAN_STREAM_RESULTS_ + +// Performs initialization dependent upon flag values obtained in +// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to +// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest +// this function is also called from RunAllTests. Since this function can be +// called more than once, it has to be idempotent. +void UnitTestImpl::PostFlagParsingInit() { + // Ensures that this function does not execute more than once. + if (!post_flag_parse_init_performed_) { + post_flag_parse_init_performed_ = true; + +#if GTEST_HAS_DEATH_TEST + InitDeathTestSubprocessControlInfo(); + SuppressTestEventsIfInSubprocess(); +#endif // GTEST_HAS_DEATH_TEST + + // Registers parameterized tests. This makes parameterized tests + // available to the UnitTest reflection API without running + // RUN_ALL_TESTS. + RegisterParameterizedTests(); + + // Configures listeners for XML output. This makes it possible for users + // to shut down the default XML output before invoking RUN_ALL_TESTS. + ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Configures listeners for streaming test results to the specified server. + ConfigureStreamingOutput(); +#endif // GTEST_CAN_STREAM_RESULTS_ + } +} + +// A predicate that checks the name of a TestCase against a known +// value. +// +// This is used for implementation of the UnitTest class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestCaseNameIs is copyable. +class TestCaseNameIs { + public: + // Constructor. + explicit TestCaseNameIs(const String& name) + : name_(name) {} + + // Returns true iff the name of test_case matches name_. + bool operator()(const TestCase* test_case) const { + return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0; + } + + private: + String name_; +}; + +// Finds and returns a TestCase with the given name. If one doesn't +// exist, creates one and returns it. It's the CALLER'S +// RESPONSIBILITY to ensure that this function is only called WHEN THE +// TESTS ARE NOT SHUFFLED. +// +// Arguments: +// +// test_case_name: name of the test case +// type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase* UnitTestImpl::GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) { + // Can we find a TestCase with the given name? + const std::vector::const_iterator test_case = + std::find_if(test_cases_.begin(), test_cases_.end(), + TestCaseNameIs(test_case_name)); + + if (test_case != test_cases_.end()) + return *test_case; + + // No. Let's create one. + TestCase* const new_test_case = + new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc); + + // Is this a death test case? + if (internal::UnitTestOptions::MatchesFilter(String(test_case_name), + kDeathTestCaseFilter)) { + // Yes. Inserts the test case after the last death test case + // defined so far. This only works when the test cases haven't + // been shuffled. Otherwise we may end up running a death test + // after a non-death test. + ++last_death_test_case_; + test_cases_.insert(test_cases_.begin() + last_death_test_case_, + new_test_case); + } else { + // No. Appends to the end of the list. + test_cases_.push_back(new_test_case); + } + + test_case_indices_.push_back(static_cast(test_case_indices_.size())); + return new_test_case; +} + +// Helpers for setting up / tearing down the given environment. They +// are for use in the ForEach() function. +static void SetUpEnvironment(Environment* env) { env->SetUp(); } +static void TearDownEnvironment(Environment* env) { env->TearDown(); } + +// Runs all tests in this UnitTest object, prints the result, and +// returns true if all tests are successful. If any exception is +// thrown during a test, the test is considered to be failed, but the +// rest of the tests will still be run. +// +// When parameterized tests are enabled, it expands and registers +// parameterized tests first in RegisterParameterizedTests(). +// All other functions called from RunAllTests() may safely assume that +// parameterized tests are ready to be counted and run. +bool UnitTestImpl::RunAllTests() { + // Makes sure InitGoogleTest() was called. + if (!GTestIsInitialized()) { + printf("%s", + "\nThis test program did NOT call ::testing::InitGoogleTest " + "before calling RUN_ALL_TESTS(). Please fix it.\n"); + return false; + } + + // Do not run any test if the --help flag was specified. + if (g_help_flag) + return true; + + // Repeats the call to the post-flag parsing initialization in case the + // user didn't call InitGoogleTest. + PostFlagParsingInit(); + + // Even if sharding is not on, test runners may want to use the + // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding + // protocol. + internal::WriteToShardStatusFileIfNeeded(); + + // True iff we are in a subprocess for running a thread-safe-style + // death test. + bool in_subprocess_for_death_test = false; + +#if GTEST_HAS_DEATH_TEST + in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL); +#endif // GTEST_HAS_DEATH_TEST + + const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex, + in_subprocess_for_death_test); + + // Compares the full test names with the filter to decide which + // tests to run. + const bool has_tests_to_run = FilterTests(should_shard + ? HONOR_SHARDING_PROTOCOL + : IGNORE_SHARDING_PROTOCOL) > 0; + + // Lists the tests and exits if the --gtest_list_tests flag was specified. + if (GTEST_FLAG(list_tests)) { + // This must be called *after* FilterTests() has been called. + ListTestsMatchingFilter(); + return true; + } + + random_seed_ = GTEST_FLAG(shuffle) ? + GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0; + + // True iff at least one test has failed. + bool failed = false; + + TestEventListener* repeater = listeners()->repeater(); + + repeater->OnTestProgramStart(*parent_); + + // How many times to repeat the tests? We don't want to repeat them + // when we are inside the subprocess of a death test. + const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat); + // Repeats forever if the repeat count is negative. + const bool forever = repeat < 0; + for (int i = 0; forever || i != repeat; i++) { + // We want to preserve failures generated by ad-hoc test + // assertions executed before RUN_ALL_TESTS(). + ClearNonAdHocTestResult(); + + const TimeInMillis start = GetTimeInMillis(); + + // Shuffles test cases and tests if requested. + if (has_tests_to_run && GTEST_FLAG(shuffle)) { + random()->Reseed(random_seed_); + // This should be done before calling OnTestIterationStart(), + // such that a test event listener can see the actual test order + // in the event. + ShuffleTests(); + } + + // Tells the unit test event listeners that the tests are about to start. + repeater->OnTestIterationStart(*parent_, i); + + // Runs each test case if there is at least one test to run. + if (has_tests_to_run) { + // Sets up all environments beforehand. + repeater->OnEnvironmentsSetUpStart(*parent_); + ForEach(environments_, SetUpEnvironment); + repeater->OnEnvironmentsSetUpEnd(*parent_); + + // Runs the tests only if there was no fatal failure during global + // set-up. + if (!Test::HasFatalFailure()) { + for (int test_index = 0; test_index < total_test_case_count(); + test_index++) { + GetMutableTestCase(test_index)->Run(); + } + } + + // Tears down all environments in reverse order afterwards. + repeater->OnEnvironmentsTearDownStart(*parent_); + std::for_each(environments_.rbegin(), environments_.rend(), + TearDownEnvironment); + repeater->OnEnvironmentsTearDownEnd(*parent_); + } + + elapsed_time_ = GetTimeInMillis() - start; + + // Tells the unit test event listener that the tests have just finished. + repeater->OnTestIterationEnd(*parent_, i); + + // Gets the result and clears it. + if (!Passed()) { + failed = true; + } + + // Restores the original test order after the iteration. This + // allows the user to quickly repro a failure that happens in the + // N-th iteration without repeating the first (N - 1) iterations. + // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in + // case the user somehow changes the value of the flag somewhere + // (it's always safe to unshuffle the tests). + UnshuffleTests(); + + if (GTEST_FLAG(shuffle)) { + // Picks a new random seed for each iteration. + random_seed_ = GetNextRandomSeed(random_seed_); + } + } + + repeater->OnTestProgramEnd(*parent_); + + return !failed; +} + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded() { + const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile); + if (test_shard_file != NULL) { + FILE* const file = posix::FOpen(test_shard_file, "w"); + if (file == NULL) { + ColoredPrintf(COLOR_RED, + "Could not write to the test shard status file \"%s\" " + "specified by the %s environment variable.\n", + test_shard_file, kTestShardStatusFile); + fflush(stdout); + exit(EXIT_FAILURE); + } + fclose(file); + } +} + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (i.e., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +bool ShouldShard(const char* total_shards_env, + const char* shard_index_env, + bool in_subprocess_for_death_test) { + if (in_subprocess_for_death_test) { + return false; + } + + const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1); + const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1); + + if (total_shards == -1 && shard_index == -1) { + return false; + } else if (total_shards == -1 && shard_index != -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestShardIndex << " = " << shard_index + << ", but have left " << kTestTotalShards << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (total_shards != -1 && shard_index == -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestTotalShards << " = " << total_shards + << ", but have left " << kTestShardIndex << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (shard_index < 0 || shard_index >= total_shards) { + const Message msg = Message() + << "Invalid environment variables: we require 0 <= " + << kTestShardIndex << " < " << kTestTotalShards + << ", but you have " << kTestShardIndex << "=" << shard_index + << ", " << kTestTotalShards << "=" << total_shards << ".\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } + + return total_shards > 1; +} + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error +// and aborts. +Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) { + const char* str_val = posix::GetEnv(var); + if (str_val == NULL) { + return default_val; + } + + Int32 result; + if (!ParseInt32(Message() << "The value of environment variable " << var, + str_val, &result)) { + exit(EXIT_FAILURE); + } + return result; +} + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) { + return (test_id % total_shards) == shard_index; +} + +// Compares the name of each test with the user-specified filter to +// decide whether the test should be run, then records the result in +// each TestCase and TestInfo object. +// If shard_tests == true, further filters tests based on sharding +// variables in the environment - see +// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide. +// Returns the number of tests that should run. +int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { + const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestTotalShards, -1) : -1; + const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestShardIndex, -1) : -1; + + // num_runnable_tests are the number of tests that will + // run across all shards (i.e., match filter and are not disabled). + // num_selected_tests are the number of tests to be run on + // this shard. + int num_runnable_tests = 0; + int num_selected_tests = 0; + for (size_t i = 0; i < test_cases_.size(); i++) { + TestCase* const test_case = test_cases_[i]; + const String &test_case_name = test_case->name(); + test_case->set_should_run(false); + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + TestInfo* const test_info = test_case->test_info_list()[j]; + const String test_name(test_info->name()); + // A test is disabled if test case name or test name matches + // kDisableTestFilter. + const bool is_disabled = + internal::UnitTestOptions::MatchesFilter(test_case_name, + kDisableTestFilter) || + internal::UnitTestOptions::MatchesFilter(test_name, + kDisableTestFilter); + test_info->is_disabled_ = is_disabled; + + const bool matches_filter = + internal::UnitTestOptions::FilterMatchesTest(test_case_name, + test_name); + test_info->matches_filter_ = matches_filter; + + const bool is_runnable = + (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) && + matches_filter; + + const bool is_selected = is_runnable && + (shard_tests == IGNORE_SHARDING_PROTOCOL || + ShouldRunTestOnShard(total_shards, shard_index, + num_runnable_tests)); + + num_runnable_tests += is_runnable; + num_selected_tests += is_selected; + + test_info->should_run_ = is_selected; + test_case->set_should_run(test_case->should_run() || is_selected); + } + } + return num_selected_tests; +} + +// Prints the names of the tests matching the user-specified filter flag. +void UnitTestImpl::ListTestsMatchingFilter() { + for (size_t i = 0; i < test_cases_.size(); i++) { + const TestCase* const test_case = test_cases_[i]; + bool printed_test_case_name = false; + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + const TestInfo* const test_info = + test_case->test_info_list()[j]; + if (test_info->matches_filter_) { + if (!printed_test_case_name) { + printed_test_case_name = true; + printf("%s.\n", test_case->name()); + } + printf(" %s\n", test_info->name()); + } + } + } + fflush(stdout); +} + +// Sets the OS stack trace getter. +// +// Does nothing if the input and the current OS stack trace getter are +// the same; otherwise, deletes the old getter and makes the input the +// current getter. +void UnitTestImpl::set_os_stack_trace_getter( + OsStackTraceGetterInterface* getter) { + if (os_stack_trace_getter_ != getter) { + delete os_stack_trace_getter_; + os_stack_trace_getter_ = getter; + } +} + +// Returns the current OS stack trace getter if it is not NULL; +// otherwise, creates an OsStackTraceGetter, makes it the current +// getter, and returns it. +OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() { + if (os_stack_trace_getter_ == NULL) { + os_stack_trace_getter_ = new OsStackTraceGetter; + } + + return os_stack_trace_getter_; +} + +// Returns the TestResult for the test that's currently running, or +// the TestResult for the ad hoc test if no test is running. +TestResult* UnitTestImpl::current_test_result() { + return current_test_info_ ? + &(current_test_info_->result_) : &ad_hoc_test_result_; +} + +// Shuffles all test cases, and the tests within each test case, +// making sure that death tests are still run first. +void UnitTestImpl::ShuffleTests() { + // Shuffles the death test cases. + ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_); + + // Shuffles the non-death test cases. + ShuffleRange(random(), last_death_test_case_ + 1, + static_cast(test_cases_.size()), &test_case_indices_); + + // Shuffles the tests inside each test case. + for (size_t i = 0; i < test_cases_.size(); i++) { + test_cases_[i]->ShuffleTests(random()); + } +} + +// Restores the test cases and tests to their order before the first shuffle. +void UnitTestImpl::UnshuffleTests() { + for (size_t i = 0; i < test_cases_.size(); i++) { + // Unshuffles the tests in each test case. + test_cases_[i]->UnshuffleTests(); + // Resets the index of each test case. + test_case_indices_[i] = static_cast(i); + } +} + +// Returns the current OS stack trace as a String. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +String GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, + int skip_count) { + // We pass skip_count + 1 to skip this wrapper function in addition + // to what the user really wants to skip. + return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); +} + +// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to +// suppress unreachable code warnings. +namespace { +class ClassUniqueToAlwaysTrue {}; +} + +bool IsTrue(bool condition) { return condition; } + +bool AlwaysTrue() { +#if GTEST_HAS_EXCEPTIONS + // This condition is always false so AlwaysTrue() never actually throws, + // but it makes the compiler think that it may throw. + if (IsTrue(false)) + throw ClassUniqueToAlwaysTrue(); +#endif // GTEST_HAS_EXCEPTIONS + return true; +} + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +bool SkipPrefix(const char* prefix, const char** pstr) { + const size_t prefix_len = strlen(prefix); + if (strncmp(*pstr, prefix, prefix_len) == 0) { + *pstr += prefix_len; + return true; + } + return false; +} + +// Parses a string as a command line flag. The string should have +// the format "--flag=value". When def_optional is true, the "=value" +// part can be omitted. +// +// Returns the value of the flag, or NULL if the parsing failed. +const char* ParseFlagValue(const char* str, + const char* flag, + bool def_optional) { + // str and flag must not be NULL. + if (str == NULL || flag == NULL) return NULL; + + // The flag must start with "--" followed by GTEST_FLAG_PREFIX_. + const String flag_str = String::Format("--%s%s", GTEST_FLAG_PREFIX_, flag); + const size_t flag_len = flag_str.length(); + if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL; + + // Skips the flag name. + const char* flag_end = str + flag_len; + + // When def_optional is true, it's OK to not have a "=value" part. + if (def_optional && (flag_end[0] == '\0')) { + return flag_end; + } + + // If def_optional is true and there are more characters after the + // flag name, or if def_optional is false, there must be a '=' after + // the flag name. + if (flag_end[0] != '=') return NULL; + + // Returns the string after "=". + return flag_end + 1; +} + +// Parses a string for a bool flag, in the form of either +// "--flag=value" or "--flag". +// +// In the former case, the value is taken as true as long as it does +// not start with '0', 'f', or 'F'. +// +// In the latter case, the value is taken as true. +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseBoolFlag(const char* str, const char* flag, bool* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, true); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Converts the string value to a bool. + *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F'); + return true; +} + +// Parses a string for an Int32 flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseInt32Flag(const char* str, const char* flag, Int32* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + return ParseInt32(Message() << "The value of flag --" << flag, + value_str, value); +} + +// Parses a string for a string flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseStringFlag(const char* str, const char* flag, String* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + *value = value_str; + return true; +} + +// Determines whether a string has a prefix that Google Test uses for its +// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_. +// If Google Test detects that a command line flag has its prefix but is not +// recognized, it will print its help message. Flags starting with +// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test +// internal flags and do not trigger the help message. +static bool HasGoogleTestFlagPrefix(const char* str) { + return (SkipPrefix("--", &str) || + SkipPrefix("-", &str) || + SkipPrefix("/", &str)) && + !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) && + (SkipPrefix(GTEST_FLAG_PREFIX_, &str) || + SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str)); +} + +// Prints a string containing code-encoded text. The following escape +// sequences can be used in the string to control the text color: +// +// @@ prints a single '@' character. +// @R changes the color to red. +// @G changes the color to green. +// @Y changes the color to yellow. +// @D changes to the default terminal text color. +// +// TODO(wan@google.com): Write tests for this once we add stdout +// capturing to Google Test. +static void PrintColorEncoded(const char* str) { + GTestColor color = COLOR_DEFAULT; // The current color. + + // Conceptually, we split the string into segments divided by escape + // sequences. Then we print one segment at a time. At the end of + // each iteration, the str pointer advances to the beginning of the + // next segment. + for (;;) { + const char* p = strchr(str, '@'); + if (p == NULL) { + ColoredPrintf(color, "%s", str); + return; + } + + ColoredPrintf(color, "%s", String(str, p - str).c_str()); + + const char ch = p[1]; + str = p + 2; + if (ch == '@') { + ColoredPrintf(color, "@"); + } else if (ch == 'D') { + color = COLOR_DEFAULT; + } else if (ch == 'R') { + color = COLOR_RED; + } else if (ch == 'G') { + color = COLOR_GREEN; + } else if (ch == 'Y') { + color = COLOR_YELLOW; + } else { + --str; + } + } +} + +static const char kColorEncodedHelpMessage[] = +"This program contains tests written using " GTEST_NAME_ ". You can use the\n" +"following command line flags to control its behavior:\n" +"\n" +"Test Selection:\n" +" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n" +" List the names of all tests instead of running them. The name of\n" +" TEST(Foo, Bar) is \"Foo.Bar\".\n" +" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS" + "[@G-@YNEGATIVE_PATTERNS]@D\n" +" Run only the tests whose name matches one of the positive patterns but\n" +" none of the negative patterns. '?' matches any single character; '*'\n" +" matches any substring; ':' separates two patterns.\n" +" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n" +" Run all disabled tests too.\n" +"\n" +"Test Execution:\n" +" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n" +" Run the tests repeatedly; use a negative count to repeat forever.\n" +" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n" +" Randomize tests' orders on every iteration.\n" +" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n" +" Random number seed to use for shuffling test orders (between 1 and\n" +" 99999, or 0 to use a seed based on the current time).\n" +"\n" +"Test Output:\n" +" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n" +" Enable/disable colored output. The default is @Gauto@D.\n" +" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n" +" Don't print the elapsed time of each test.\n" +" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G" + GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n" +" Generate an XML report in the given directory or with the given file\n" +" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n" +#if GTEST_CAN_STREAM_RESULTS_ +" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n" +" Stream test results to the given server.\n" +#endif // GTEST_CAN_STREAM_RESULTS_ +"\n" +"Assertion Behavior:\n" +#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n" +" Set the default death test style.\n" +#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n" +" Turn assertion failures into debugger break-points.\n" +" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n" +" Turn assertion failures into C++ exceptions.\n" +" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n" +" Do not report exceptions as test failures. Instead, allow them\n" +" to crash the program or throw a pop-up (on Windows).\n" +"\n" +"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set " + "the corresponding\n" +"environment variable of a flag (all letters in upper-case). For example, to\n" +"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_ + "color=no@D or set\n" +"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n" +"\n" +"For more information, please read the " GTEST_NAME_ " documentation at\n" +"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n" +"(not one in your own code or tests), please report it to\n" +"@G<" GTEST_DEV_EMAIL_ ">@D.\n"; + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. The type parameter CharType can be +// instantiated to either char or wchar_t. +template +void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { + for (int i = 1; i < *argc; i++) { + const String arg_string = StreamableToString(argv[i]); + const char* const arg = arg_string.c_str(); + + using internal::ParseBoolFlag; + using internal::ParseInt32Flag; + using internal::ParseStringFlag; + + // Do we see a Google Test flag? + if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag, + >EST_FLAG(also_run_disabled_tests)) || + ParseBoolFlag(arg, kBreakOnFailureFlag, + >EST_FLAG(break_on_failure)) || + ParseBoolFlag(arg, kCatchExceptionsFlag, + >EST_FLAG(catch_exceptions)) || + ParseStringFlag(arg, kColorFlag, >EST_FLAG(color)) || + ParseStringFlag(arg, kDeathTestStyleFlag, + >EST_FLAG(death_test_style)) || + ParseBoolFlag(arg, kDeathTestUseFork, + >EST_FLAG(death_test_use_fork)) || + ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) || + ParseStringFlag(arg, kInternalRunDeathTestFlag, + >EST_FLAG(internal_run_death_test)) || + ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) || + ParseStringFlag(arg, kOutputFlag, >EST_FLAG(output)) || + ParseBoolFlag(arg, kPrintTimeFlag, >EST_FLAG(print_time)) || + ParseInt32Flag(arg, kRandomSeedFlag, >EST_FLAG(random_seed)) || + ParseInt32Flag(arg, kRepeatFlag, >EST_FLAG(repeat)) || + ParseBoolFlag(arg, kShuffleFlag, >EST_FLAG(shuffle)) || + ParseInt32Flag(arg, kStackTraceDepthFlag, + >EST_FLAG(stack_trace_depth)) || + ParseStringFlag(arg, kStreamResultToFlag, + >EST_FLAG(stream_result_to)) || + ParseBoolFlag(arg, kThrowOnFailureFlag, + >EST_FLAG(throw_on_failure)) + ) { + // Yes. Shift the remainder of the argv list left by one. Note + // that argv has (*argc + 1) elements, the last one always being + // NULL. The following loop moves the trailing NULL element as + // well. + for (int j = i; j != *argc; j++) { + argv[j] = argv[j + 1]; + } + + // Decrements the argument count. + (*argc)--; + + // We also need to decrement the iterator as we just removed + // an element. + i--; + } else if (arg_string == "--help" || arg_string == "-h" || + arg_string == "-?" || arg_string == "/?" || + HasGoogleTestFlagPrefix(arg)) { + // Both help flag and unrecognized Google Test flags (excluding + // internal ones) trigger help display. + g_help_flag = true; + } + } + + if (g_help_flag) { + // We print the help here instead of in RUN_ALL_TESTS(), as the + // latter may not be called at all if the user is using Google + // Test with another testing framework. + PrintColorEncoded(kColorEncodedHelpMessage); + } +} + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +void ParseGoogleTestFlagsOnly(int* argc, char** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} +void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} + +// The internal implementation of InitGoogleTest(). +// +// The type parameter CharType can be instantiated to either char or +// wchar_t. +template +void InitGoogleTestImpl(int* argc, CharType** argv) { + g_init_gtest_count++; + + // We don't want to run the initialization code twice. + if (g_init_gtest_count != 1) return; + + if (*argc <= 0) return; + + internal::g_executable_path = internal::StreamableToString(argv[0]); + +#if GTEST_HAS_DEATH_TEST + + g_argvs.clear(); + for (int i = 0; i != *argc; i++) { + g_argvs.push_back(StreamableToString(argv[i])); + } + +#endif // GTEST_HAS_DEATH_TEST + + ParseGoogleTestFlagsOnly(argc, argv); + GetUnitTestImpl()->PostFlagParsingInit(); +} + +} // namespace internal + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +void InitGoogleTest(int* argc, char** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +void InitGoogleTest(int* argc, wchar_t** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +} // namespace testing +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev) +// +// This file implements death tests. + + +#if GTEST_HAS_DEATH_TEST + +# if GTEST_OS_MAC +# include +# endif // GTEST_OS_MAC + +# include +# include +# include +# include + +# if GTEST_OS_WINDOWS +# include +# else +# include +# include +# endif // GTEST_OS_WINDOWS + +#endif // GTEST_HAS_DEATH_TEST + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +// Constants. + +// The default death test style. +static const char kDefaultDeathTestStyle[] = "fast"; + +GTEST_DEFINE_string_( + death_test_style, + internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle), + "Indicates how to run a death test in a forked child process: " + "\"threadsafe\" (child process re-executes the test binary " + "from the beginning, running only the specific death test) or " + "\"fast\" (child process runs the death test immediately " + "after forking)."); + +GTEST_DEFINE_bool_( + death_test_use_fork, + internal::BoolFromGTestEnv("death_test_use_fork", false), + "Instructs to use fork()/_exit() instead of clone() in death tests. " + "Ignored and always uses fork() on POSIX systems where clone() is not " + "implemented. Useful when running under valgrind or similar tools if " + "those do not support clone(). Valgrind 3.3.1 will just fail if " + "it sees an unsupported combination of clone() flags. " + "It is not recommended to use this flag w/o valgrind though it will " + "work in 99% of the cases. Once valgrind is fixed, this flag will " + "most likely be removed."); + +namespace internal { +GTEST_DEFINE_string_( + internal_run_death_test, "", + "Indicates the file, line number, temporal index of " + "the single death test to run, and a file descriptor to " + "which a success code may be sent, all separated by " + "colons. This flag is specified if and only if the current " + "process is a sub-process launched for running a thread-safe " + "death test. FOR INTERNAL USE ONLY."); +} // namespace internal + +#if GTEST_HAS_DEATH_TEST + +// ExitedWithCode constructor. +ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) { +} + +// ExitedWithCode function-call operator. +bool ExitedWithCode::operator()(int exit_status) const { +# if GTEST_OS_WINDOWS + + return exit_status == exit_code_; + +# else + + return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_; + +# endif // GTEST_OS_WINDOWS +} + +# if !GTEST_OS_WINDOWS +// KilledBySignal constructor. +KilledBySignal::KilledBySignal(int signum) : signum_(signum) { +} + +// KilledBySignal function-call operator. +bool KilledBySignal::operator()(int exit_status) const { + return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_; +} +# endif // !GTEST_OS_WINDOWS + +namespace internal { + +// Utilities needed for death tests. + +// Generates a textual description of a given exit code, in the format +// specified by wait(2). +static String ExitSummary(int exit_code) { + Message m; + +# if GTEST_OS_WINDOWS + + m << "Exited with exit status " << exit_code; + +# else + + if (WIFEXITED(exit_code)) { + m << "Exited with exit status " << WEXITSTATUS(exit_code); + } else if (WIFSIGNALED(exit_code)) { + m << "Terminated by signal " << WTERMSIG(exit_code); + } +# ifdef WCOREDUMP + if (WCOREDUMP(exit_code)) { + m << " (core dumped)"; + } +# endif +# endif // GTEST_OS_WINDOWS + + return m.GetString(); +} + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +bool ExitedUnsuccessfully(int exit_status) { + return !ExitedWithCode(0)(exit_status); +} + +# if !GTEST_OS_WINDOWS +// Generates a textual failure message when a death test finds more than +// one thread running, or cannot determine the number of threads, prior +// to executing the given statement. It is the responsibility of the +// caller not to pass a thread_count of 1. +static String DeathTestThreadWarning(size_t thread_count) { + Message msg; + msg << "Death tests use fork(), which is unsafe particularly" + << " in a threaded context. For this test, " << GTEST_NAME_ << " "; + if (thread_count == 0) + msg << "couldn't detect the number of threads."; + else + msg << "detected " << thread_count << " threads."; + return msg.GetString(); +} +# endif // !GTEST_OS_WINDOWS + +// Flag characters for reporting a death test that did not die. +static const char kDeathTestLived = 'L'; +static const char kDeathTestReturned = 'R'; +static const char kDeathTestThrew = 'T'; +static const char kDeathTestInternalError = 'I'; + +// An enumeration describing all of the possible ways that a death test can +// conclude. DIED means that the process died while executing the test +// code; LIVED means that process lived beyond the end of the test code; +// RETURNED means that the test statement attempted to execute a return +// statement, which is not allowed; THREW means that the test statement +// returned control by throwing an exception. IN_PROGRESS means the test +// has not yet concluded. +// TODO(vladl@google.com): Unify names and possibly values for +// AbortReason, DeathTestOutcome, and flag characters above. +enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }; + +// Routine for aborting the program which is safe to call from an +// exec-style death test child process, in which case the error +// message is propagated back to the parent process. Otherwise, the +// message is simply printed to stderr. In either case, the program +// then exits with status 1. +void DeathTestAbort(const String& message) { + // On a POSIX system, this function may be called from a threadsafe-style + // death test child process, which operates on a very small stack. Use + // the heap for any additional non-minuscule memory requirements. + const InternalRunDeathTestFlag* const flag = + GetUnitTestImpl()->internal_run_death_test_flag(); + if (flag != NULL) { + FILE* parent = posix::FDOpen(flag->write_fd(), "w"); + fputc(kDeathTestInternalError, parent); + fprintf(parent, "%s", message.c_str()); + fflush(parent); + _exit(1); + } else { + fprintf(stderr, "%s", message.c_str()); + fflush(stderr); + posix::Abort(); + } +} + +// A replacement for CHECK that calls DeathTestAbort if the assertion +// fails. +# define GTEST_DEATH_TEST_CHECK_(expression) \ + do { \ + if (!::testing::internal::IsTrue(expression)) { \ + DeathTestAbort(::testing::internal::String::Format( \ + "CHECK failed: File %s, line %d: %s", \ + __FILE__, __LINE__, #expression)); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for +// evaluating any system call that fulfills two conditions: it must return +// -1 on failure, and set errno to EINTR when it is interrupted and +// should be tried again. The macro expands to a loop that repeatedly +// evaluates the expression as long as it evaluates to -1 and sets +// errno to EINTR. If the expression evaluates to -1 but errno is +// something other than EINTR, DeathTestAbort is called. +# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \ + do { \ + int gtest_retval; \ + do { \ + gtest_retval = (expression); \ + } while (gtest_retval == -1 && errno == EINTR); \ + if (gtest_retval == -1) { \ + DeathTestAbort(::testing::internal::String::Format( \ + "CHECK failed: File %s, line %d: %s != -1", \ + __FILE__, __LINE__, #expression)); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// Returns the message describing the last system error in errno. +String GetLastErrnoDescription() { + return String(errno == 0 ? "" : posix::StrError(errno)); +} + +// This is called from a death test parent process to read a failure +// message from the death test child process and log it with the FATAL +// severity. On Windows, the message is read from a pipe handle. On other +// platforms, it is read from a file descriptor. +static void FailFromInternalError(int fd) { + Message error; + char buffer[256]; + int num_read; + + do { + while ((num_read = posix::Read(fd, buffer, 255)) > 0) { + buffer[num_read] = '\0'; + error << buffer; + } + } while (num_read == -1 && errno == EINTR); + + if (num_read == 0) { + GTEST_LOG_(FATAL) << error.GetString(); + } else { + const int last_error = errno; + GTEST_LOG_(FATAL) << "Error while reading death test internal: " + << GetLastErrnoDescription() << " [" << last_error << "]"; + } +} + +// Death test constructor. Increments the running death test count +// for the current test. +DeathTest::DeathTest() { + TestInfo* const info = GetUnitTestImpl()->current_test_info(); + if (info == NULL) { + DeathTestAbort("Cannot run a death test outside of a TEST or " + "TEST_F construct"); + } +} + +// Creates and returns a death test by dispatching to the current +// death test factory. +bool DeathTest::Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) { + return GetUnitTestImpl()->death_test_factory()->Create( + statement, regex, file, line, test); +} + +const char* DeathTest::LastMessage() { + return last_death_test_message_.c_str(); +} + +void DeathTest::set_last_death_test_message(const String& message) { + last_death_test_message_ = message; +} + +String DeathTest::last_death_test_message_; + +// Provides cross platform implementation for some death functionality. +class DeathTestImpl : public DeathTest { + protected: + DeathTestImpl(const char* a_statement, const RE* a_regex) + : statement_(a_statement), + regex_(a_regex), + spawned_(false), + status_(-1), + outcome_(IN_PROGRESS), + read_fd_(-1), + write_fd_(-1) {} + + // read_fd_ is expected to be closed and cleared by a derived class. + ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); } + + void Abort(AbortReason reason); + virtual bool Passed(bool status_ok); + + const char* statement() const { return statement_; } + const RE* regex() const { return regex_; } + bool spawned() const { return spawned_; } + void set_spawned(bool is_spawned) { spawned_ = is_spawned; } + int status() const { return status_; } + void set_status(int a_status) { status_ = a_status; } + DeathTestOutcome outcome() const { return outcome_; } + void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; } + int read_fd() const { return read_fd_; } + void set_read_fd(int fd) { read_fd_ = fd; } + int write_fd() const { return write_fd_; } + void set_write_fd(int fd) { write_fd_ = fd; } + + // Called in the parent process only. Reads the result code of the death + // test child process via a pipe, interprets it to set the outcome_ + // member, and closes read_fd_. Outputs diagnostics and terminates in + // case of unexpected codes. + void ReadAndInterpretStatusByte(); + + private: + // The textual content of the code this object is testing. This class + // doesn't own this string and should not attempt to delete it. + const char* const statement_; + // The regular expression which test output must match. DeathTestImpl + // doesn't own this object and should not attempt to delete it. + const RE* const regex_; + // True if the death test child process has been successfully spawned. + bool spawned_; + // The exit status of the child process. + int status_; + // How the death test concluded. + DeathTestOutcome outcome_; + // Descriptor to the read end of the pipe to the child process. It is + // always -1 in the child process. The child keeps its write end of the + // pipe in write_fd_. + int read_fd_; + // Descriptor to the child's write end of the pipe to the parent process. + // It is always -1 in the parent process. The parent keeps its end of the + // pipe in read_fd_. + int write_fd_; +}; + +// Called in the parent process only. Reads the result code of the death +// test child process via a pipe, interprets it to set the outcome_ +// member, and closes read_fd_. Outputs diagnostics and terminates in +// case of unexpected codes. +void DeathTestImpl::ReadAndInterpretStatusByte() { + char flag; + int bytes_read; + + // The read() here blocks until data is available (signifying the + // failure of the death test) or until the pipe is closed (signifying + // its success), so it's okay to call this in the parent before + // the child process has exited. + do { + bytes_read = posix::Read(read_fd(), &flag, 1); + } while (bytes_read == -1 && errno == EINTR); + + if (bytes_read == 0) { + set_outcome(DIED); + } else if (bytes_read == 1) { + switch (flag) { + case kDeathTestReturned: + set_outcome(RETURNED); + break; + case kDeathTestThrew: + set_outcome(THREW); + break; + case kDeathTestLived: + set_outcome(LIVED); + break; + case kDeathTestInternalError: + FailFromInternalError(read_fd()); // Does not return. + break; + default: + GTEST_LOG_(FATAL) << "Death test child process reported " + << "unexpected status byte (" + << static_cast(flag) << ")"; + } + } else { + GTEST_LOG_(FATAL) << "Read from death test child process failed: " + << GetLastErrnoDescription(); + } + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd())); + set_read_fd(-1); +} + +// Signals that the death test code which should have exited, didn't. +// Should be called only in a death test child process. +// Writes a status byte to the child's status file descriptor, then +// calls _exit(1). +void DeathTestImpl::Abort(AbortReason reason) { + // The parent process considers the death test to be a failure if + // it finds any data in our pipe. So, here we write a single flag byte + // to the pipe, then exit. + const char status_ch = + reason == TEST_DID_NOT_DIE ? kDeathTestLived : + reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned; + + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1)); + // We are leaking the descriptor here because on some platforms (i.e., + // when built as Windows DLL), destructors of global objects will still + // run after calling _exit(). On such systems, write_fd_ will be + // indirectly closed from the destructor of UnitTestImpl, causing double + // close if it is also closed here. On debug configurations, double close + // may assert. As there are no in-process buffers to flush here, we are + // relying on the OS to close the descriptor after the process terminates + // when the destructors are not run. + _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash) +} + +// Returns an indented copy of stderr output for a death test. +// This makes distinguishing death test output lines from regular log lines +// much easier. +static ::std::string FormatDeathTestOutput(const ::std::string& output) { + ::std::string ret; + for (size_t at = 0; ; ) { + const size_t line_end = output.find('\n', at); + ret += "[ DEATH ] "; + if (line_end == ::std::string::npos) { + ret += output.substr(at); + break; + } + ret += output.substr(at, line_end + 1 - at); + at = line_end + 1; + } + return ret; +} + +// Assesses the success or failure of a death test, using both private +// members which have previously been set, and one argument: +// +// Private data members: +// outcome: An enumeration describing how the death test +// concluded: DIED, LIVED, THREW, or RETURNED. The death test +// fails in the latter three cases. +// status: The exit status of the child process. On *nix, it is in the +// in the format specified by wait(2). On Windows, this is the +// value supplied to the ExitProcess() API or a numeric code +// of the exception that terminated the program. +// regex: A regular expression object to be applied to +// the test's captured standard error output; the death test +// fails if it does not match. +// +// Argument: +// status_ok: true if exit_status is acceptable in the context of +// this particular death test, which fails if it is false +// +// Returns true iff all of the above conditions are met. Otherwise, the +// first failing condition, in the order given above, is the one that is +// reported. Also sets the last death test message string. +bool DeathTestImpl::Passed(bool status_ok) { + if (!spawned()) + return false; + + const String error_message = GetCapturedStderr(); + + bool success = false; + Message buffer; + + buffer << "Death test: " << statement() << "\n"; + switch (outcome()) { + case LIVED: + buffer << " Result: failed to die.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case THREW: + buffer << " Result: threw an exception.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case RETURNED: + buffer << " Result: illegal return in test statement.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case DIED: + if (status_ok) { + const bool matched = RE::PartialMatch(error_message.c_str(), *regex()); + if (matched) { + success = true; + } else { + buffer << " Result: died but not with expected error.\n" + << " Expected: " << regex()->pattern() << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + } else { + buffer << " Result: died but not with expected exit code:\n" + << " " << ExitSummary(status()) << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + break; + case IN_PROGRESS: + default: + GTEST_LOG_(FATAL) + << "DeathTest::Passed somehow called before conclusion of test"; + } + + DeathTest::set_last_death_test_message(buffer.GetString()); + return success; +} + +# if GTEST_OS_WINDOWS +// WindowsDeathTest implements death tests on Windows. Due to the +// specifics of starting new processes on Windows, death tests there are +// always threadsafe, and Google Test considers the +// --gtest_death_test_style=fast setting to be equivalent to +// --gtest_death_test_style=threadsafe there. +// +// A few implementation notes: Like the Linux version, the Windows +// implementation uses pipes for child-to-parent communication. But due to +// the specifics of pipes on Windows, some extra steps are required: +// +// 1. The parent creates a communication pipe and stores handles to both +// ends of it. +// 2. The parent starts the child and provides it with the information +// necessary to acquire the handle to the write end of the pipe. +// 3. The child acquires the write end of the pipe and signals the parent +// using a Windows event. +// 4. Now the parent can release the write end of the pipe on its side. If +// this is done before step 3, the object's reference count goes down to +// 0 and it is destroyed, preventing the child from acquiring it. The +// parent now has to release it, or read operations on the read end of +// the pipe will not return when the child terminates. +// 5. The parent reads child's output through the pipe (outcome code and +// any possible error messages) from the pipe, and its stderr and then +// determines whether to fail the test. +// +// Note: to distinguish Win32 API calls from the local method and function +// calls, the former are explicitly resolved in the global namespace. +// +class WindowsDeathTest : public DeathTestImpl { + public: + WindowsDeathTest(const char* a_statement, + const RE* a_regex, + const char* file, + int line) + : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {} + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + virtual TestRole AssumeRole(); + + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; + // Handle to the write end of the pipe to the child process. + AutoHandle write_handle_; + // Child process handle. + AutoHandle child_handle_; + // Event the child process uses to signal the parent that it has + // acquired the handle to the write end of the pipe. After seeing this + // event the parent can release its own handles to make sure its + // ReadFile() calls return when the child terminates. + AutoHandle event_handle_; +}; + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int WindowsDeathTest::Wait() { + if (!spawned()) + return 0; + + // Wait until the child either signals that it has acquired the write end + // of the pipe or it dies. + const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() }; + switch (::WaitForMultipleObjects(2, + wait_handles, + FALSE, // Waits for any of the handles. + INFINITE)) { + case WAIT_OBJECT_0: + case WAIT_OBJECT_0 + 1: + break; + default: + GTEST_DEATH_TEST_CHECK_(false); // Should not get here. + } + + // The child has acquired the write end of the pipe or exited. + // We release the handle on our side and continue. + write_handle_.Reset(); + event_handle_.Reset(); + + ReadAndInterpretStatusByte(); + + // Waits for the child process to exit if it haven't already. This + // returns immediately if the child has already exited, regardless of + // whether previous calls to WaitForMultipleObjects synchronized on this + // handle or not. + GTEST_DEATH_TEST_CHECK_( + WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(), + INFINITE)); + DWORD status_code; + GTEST_DEATH_TEST_CHECK_( + ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE); + child_handle_.Reset(); + set_status(static_cast(status_code)); + return status(); +} + +// The AssumeRole process for a Windows death test. It creates a child +// process with the same executable as the current process to run the +// death test. The child process is given the --gtest_filter and +// --gtest_internal_run_death_test flags such that it knows to run the +// current death test only. +DeathTest::TestRole WindowsDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + // ParseInternalRunDeathTestFlag() has performed all the necessary + // processing. + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + // WindowsDeathTest uses an anonymous pipe to communicate results of + // a death test. + SECURITY_ATTRIBUTES handles_are_inheritable = { + sizeof(SECURITY_ATTRIBUTES), NULL, TRUE }; + HANDLE read_handle, write_handle; + GTEST_DEATH_TEST_CHECK_( + ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable, + 0) // Default buffer size. + != FALSE); + set_read_fd(::_open_osfhandle(reinterpret_cast(read_handle), + O_RDONLY)); + write_handle_.Reset(write_handle); + event_handle_.Reset(::CreateEvent( + &handles_are_inheritable, + TRUE, // The event will automatically reset to non-signaled state. + FALSE, // The initial state is non-signalled. + NULL)); // The even is unnamed. + GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL); + const String filter_flag = String::Format("--%s%s=%s.%s", + GTEST_FLAG_PREFIX_, kFilterFlag, + info->test_case_name(), + info->name()); + const String internal_flag = String::Format( + "--%s%s=%s|%d|%d|%u|%Iu|%Iu", + GTEST_FLAG_PREFIX_, + kInternalRunDeathTestFlag, + file_, line_, + death_test_index, + static_cast(::GetCurrentProcessId()), + // size_t has the same with as pointers on both 32-bit and 64-bit + // Windows platforms. + // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx. + reinterpret_cast(write_handle), + reinterpret_cast(event_handle_.Get())); + + char executable_path[_MAX_PATH + 1]; // NOLINT + GTEST_DEATH_TEST_CHECK_( + _MAX_PATH + 1 != ::GetModuleFileNameA(NULL, + executable_path, + _MAX_PATH)); + + String command_line = String::Format("%s %s \"%s\"", + ::GetCommandLineA(), + filter_flag.c_str(), + internal_flag.c_str()); + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // Flush the log buffers since the log streams are shared with the child. + FlushInfoLog(); + + // The child process will share the standard handles with the parent. + STARTUPINFOA startup_info; + memset(&startup_info, 0, sizeof(STARTUPINFO)); + startup_info.dwFlags = STARTF_USESTDHANDLES; + startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE); + startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE); + startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE); + + PROCESS_INFORMATION process_info; + GTEST_DEATH_TEST_CHECK_(::CreateProcessA( + executable_path, + const_cast(command_line.c_str()), + NULL, // Retuned process handle is not inheritable. + NULL, // Retuned thread handle is not inheritable. + TRUE, // Child inherits all inheritable handles (for write_handle_). + 0x0, // Default creation flags. + NULL, // Inherit the parent's environment. + UnitTest::GetInstance()->original_working_dir(), + &startup_info, + &process_info) != FALSE); + child_handle_.Reset(process_info.hProcess); + ::CloseHandle(process_info.hThread); + set_spawned(true); + return OVERSEE_TEST; +} +# else // We are not on Windows. + +// ForkingDeathTest provides implementations for most of the abstract +// methods of the DeathTest interface. Only the AssumeRole method is +// left undefined. +class ForkingDeathTest : public DeathTestImpl { + public: + ForkingDeathTest(const char* statement, const RE* regex); + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + + protected: + void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; } + + private: + // PID of child process during death test; 0 in the child process itself. + pid_t child_pid_; +}; + +// Constructs a ForkingDeathTest. +ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex) + : DeathTestImpl(a_statement, a_regex), + child_pid_(-1) {} + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int ForkingDeathTest::Wait() { + if (!spawned()) + return 0; + + ReadAndInterpretStatusByte(); + + int status_value; + GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0)); + set_status(status_value); + return status_value; +} + +// A concrete death test class that forks, then immediately runs the test +// in the child process. +class NoExecDeathTest : public ForkingDeathTest { + public: + NoExecDeathTest(const char* a_statement, const RE* a_regex) : + ForkingDeathTest(a_statement, a_regex) { } + virtual TestRole AssumeRole(); +}; + +// The AssumeRole process for a fork-and-run death test. It implements a +// straightforward fork, with a simple pipe to transmit the status byte. +DeathTest::TestRole NoExecDeathTest::AssumeRole() { + const size_t thread_count = GetThreadCount(); + if (thread_count != 1) { + GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count); + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + + DeathTest::set_last_death_test_message(""); + CaptureStderr(); + // When we fork the process below, the log file buffers are copied, but the + // file descriptors are shared. We flush all log files here so that closing + // the file descriptors in the child process doesn't throw off the + // synchronization between descriptors and buffers in the parent process. + // This is as close to the fork as possible to avoid a race condition in case + // there are multiple threads running before the death test, and another + // thread writes to the log file. + FlushInfoLog(); + + const pid_t child_pid = fork(); + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + set_child_pid(child_pid); + if (child_pid == 0) { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0])); + set_write_fd(pipe_fd[1]); + // Redirects all logging to stderr in the child process to prevent + // concurrent writes to the log files. We capture stderr in the parent + // process and append the child process' output to a log. + LogToStderr(); + // Event forwarding to the listeners of event listener API mush be shut + // down in death test subprocesses. + GetUnitTestImpl()->listeners()->SuppressEventForwarding(); + return EXECUTE_TEST; + } else { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; + } +} + +// A concrete death test class that forks and re-executes the main +// program from the beginning, with command-line flags set that cause +// only this specific death test to be run. +class ExecDeathTest : public ForkingDeathTest { + public: + ExecDeathTest(const char* a_statement, const RE* a_regex, + const char* file, int line) : + ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { } + virtual TestRole AssumeRole(); + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; +}; + +// Utility class for accumulating command-line arguments. +class Arguments { + public: + Arguments() { + args_.push_back(NULL); + } + + ~Arguments() { + for (std::vector::iterator i = args_.begin(); i != args_.end(); + ++i) { + free(*i); + } + } + void AddArgument(const char* argument) { + args_.insert(args_.end() - 1, posix::StrDup(argument)); + } + + template + void AddArguments(const ::std::vector& arguments) { + for (typename ::std::vector::const_iterator i = arguments.begin(); + i != arguments.end(); + ++i) { + args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); + } + } + char* const* Argv() { + return &args_[0]; + } + private: + std::vector args_; +}; + +// A struct that encompasses the arguments to the child process of a +// threadsafe-style death test process. +struct ExecDeathTestArgs { + char* const* argv; // Command-line arguments for the child's call to exec + int close_fd; // File descriptor to close; the read end of a pipe +}; + +# if GTEST_OS_MAC +inline char** GetEnviron() { + // When Google Test is built as a framework on MacOS X, the environ variable + // is unavailable. Apple's documentation (man environ) recommends using + // _NSGetEnviron() instead. + return *_NSGetEnviron(); +} +# else +// Some POSIX platforms expect you to declare environ. extern "C" makes +// it reside in the global namespace. +extern "C" char** environ; +inline char** GetEnviron() { return environ; } +# endif // GTEST_OS_MAC + +// The main function for a threadsafe-style death test child process. +// This function is called in a clone()-ed process and thus must avoid +// any potentially unsafe operations like malloc or libc functions. +static int ExecDeathTestChildMain(void* child_arg) { + ExecDeathTestArgs* const args = static_cast(child_arg); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd)); + + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(String::Format("chdir(\"%s\") failed: %s", + original_dir, + GetLastErrnoDescription().c_str())); + return EXIT_FAILURE; + } + + // We can safely call execve() as it's a direct system call. We + // cannot use execvp() as it's a libc function and thus potentially + // unsafe. Since execve() doesn't search the PATH, the user must + // invoke the test program via a valid path that contains at least + // one path separator. + execve(args->argv[0], args->argv, GetEnviron()); + DeathTestAbort(String::Format("execve(%s, ...) in %s failed: %s", + args->argv[0], + original_dir, + GetLastErrnoDescription().c_str())); + return EXIT_FAILURE; +} + +// Two utility routines that together determine the direction the stack +// grows. +// This could be accomplished more elegantly by a single recursive +// function, but we want to guard against the unlikely possibility of +// a smart compiler optimizing the recursion away. +// +// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining +// StackLowerThanAddress into StackGrowsDown, which then doesn't give +// correct answer. +bool StackLowerThanAddress(const void* ptr) GTEST_NO_INLINE_; +bool StackLowerThanAddress(const void* ptr) { + int dummy; + return &dummy < ptr; +} + +bool StackGrowsDown() { + int dummy; + return StackLowerThanAddress(&dummy); +} + +// A threadsafe implementation of fork(2) for threadsafe-style death tests +// that uses clone(2). It dies with an error message if anything goes +// wrong. +static pid_t ExecDeathTestFork(char* const* argv, int close_fd) { + ExecDeathTestArgs args = { argv, close_fd }; + pid_t child_pid = -1; + +# if GTEST_HAS_CLONE + const bool use_fork = GTEST_FLAG(death_test_use_fork); + + if (!use_fork) { + static const bool stack_grows_down = StackGrowsDown(); + const size_t stack_size = getpagesize(); + // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead. + void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED); + void* const stack_top = + static_cast(stack) + (stack_grows_down ? stack_size : 0); + + child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args); + + GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1); + } +# else + const bool use_fork = true; +# endif // GTEST_HAS_CLONE + + if (use_fork && (child_pid = fork()) == 0) { + ExecDeathTestChildMain(&args); + _exit(0); + } + + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + return child_pid; +} + +// The AssumeRole process for a fork-and-exec death test. It re-executes the +// main program from the beginning, setting the --gtest_filter +// and --gtest_internal_run_death_test flags to cause only the current +// death test to be re-run. +DeathTest::TestRole ExecDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + // Clear the close-on-exec flag on the write end of the pipe, lest + // it be closed when the child process does an exec: + GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1); + + const String filter_flag = + String::Format("--%s%s=%s.%s", + GTEST_FLAG_PREFIX_, kFilterFlag, + info->test_case_name(), info->name()); + const String internal_flag = + String::Format("--%s%s=%s|%d|%d|%d", + GTEST_FLAG_PREFIX_, kInternalRunDeathTestFlag, + file_, line_, death_test_index, pipe_fd[1]); + Arguments args; + args.AddArguments(GetArgvs()); + args.AddArgument(filter_flag.c_str()); + args.AddArgument(internal_flag.c_str()); + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // See the comment in NoExecDeathTest::AssumeRole for why the next line + // is necessary. + FlushInfoLog(); + + const pid_t child_pid = ExecDeathTestFork(args.Argv(), pipe_fd[0]); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_child_pid(child_pid); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; +} + +# endif // !GTEST_OS_WINDOWS + +// Creates a concrete DeathTest-derived class that depends on the +// --gtest_death_test_style flag, and sets the pointer pointed to +// by the "test" argument to its address. If the test should be +// skipped, sets that pointer to NULL. Returns true, unless the +// flag is set to an invalid value. +bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex, + const char* file, int line, + DeathTest** test) { + UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const int death_test_index = impl->current_test_info() + ->increment_death_test_count(); + + if (flag != NULL) { + if (death_test_index > flag->index()) { + DeathTest::set_last_death_test_message(String::Format( + "Death test count (%d) somehow exceeded expected maximum (%d)", + death_test_index, flag->index())); + return false; + } + + if (!(flag->file() == file && flag->line() == line && + flag->index() == death_test_index)) { + *test = NULL; + return true; + } + } + +# if GTEST_OS_WINDOWS + + if (GTEST_FLAG(death_test_style) == "threadsafe" || + GTEST_FLAG(death_test_style) == "fast") { + *test = new WindowsDeathTest(statement, regex, file, line); + } + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") { + *test = new ExecDeathTest(statement, regex, file, line); + } else if (GTEST_FLAG(death_test_style) == "fast") { + *test = new NoExecDeathTest(statement, regex); + } + +# endif // GTEST_OS_WINDOWS + + else { // NOLINT - this is more readable than unbalanced brackets inside #if. + DeathTest::set_last_death_test_message(String::Format( + "Unknown death test style \"%s\" encountered", + GTEST_FLAG(death_test_style).c_str())); + return false; + } + + return true; +} + +// Splits a given string on a given delimiter, populating a given +// vector with the fields. GTEST_HAS_DEATH_TEST implies that we have +// ::std::string, so we can use it here. +static void SplitString(const ::std::string& str, char delimiter, + ::std::vector< ::std::string>* dest) { + ::std::vector< ::std::string> parsed; + ::std::string::size_type pos = 0; + while (::testing::internal::AlwaysTrue()) { + const ::std::string::size_type colon = str.find(delimiter, pos); + if (colon == ::std::string::npos) { + parsed.push_back(str.substr(pos)); + break; + } else { + parsed.push_back(str.substr(pos, colon - pos)); + pos = colon + 1; + } + } + dest->swap(parsed); +} + +# if GTEST_OS_WINDOWS +// Recreates the pipe and event handles from the provided parameters, +// signals the event, and returns a file descriptor wrapped around the pipe +// handle. This function is called in the child process only. +int GetStatusFileDescriptor(unsigned int parent_process_id, + size_t write_handle_as_size_t, + size_t event_handle_as_size_t) { + AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE, + FALSE, // Non-inheritable. + parent_process_id)); + if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) { + DeathTestAbort(String::Format("Unable to open parent process %u", + parent_process_id)); + } + + // TODO(vladl@google.com): Replace the following check with a + // compile-time assertion when available. + GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t)); + + const HANDLE write_handle = + reinterpret_cast(write_handle_as_size_t); + HANDLE dup_write_handle; + + // The newly initialized handle is accessible only in in the parent + // process. To obtain one accessible within the child, we need to use + // DuplicateHandle. + if (!::DuplicateHandle(parent_process_handle.Get(), write_handle, + ::GetCurrentProcess(), &dup_write_handle, + 0x0, // Requested privileges ignored since + // DUPLICATE_SAME_ACCESS is used. + FALSE, // Request non-inheritable handler. + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort(String::Format( + "Unable to duplicate the pipe handle %Iu from the parent process %u", + write_handle_as_size_t, parent_process_id)); + } + + const HANDLE event_handle = reinterpret_cast(event_handle_as_size_t); + HANDLE dup_event_handle; + + if (!::DuplicateHandle(parent_process_handle.Get(), event_handle, + ::GetCurrentProcess(), &dup_event_handle, + 0x0, + FALSE, + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort(String::Format( + "Unable to duplicate the event handle %Iu from the parent process %u", + event_handle_as_size_t, parent_process_id)); + } + + const int write_fd = + ::_open_osfhandle(reinterpret_cast(dup_write_handle), O_APPEND); + if (write_fd == -1) { + DeathTestAbort(String::Format( + "Unable to convert pipe handle %Iu to a file descriptor", + write_handle_as_size_t)); + } + + // Signals the parent that the write end of the pipe has been acquired + // so the parent can release its own write end. + ::SetEvent(dup_event_handle); + + return write_fd; +} +# endif // GTEST_OS_WINDOWS + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { + if (GTEST_FLAG(internal_run_death_test) == "") return NULL; + + // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we + // can use it here. + int line = -1; + int index = -1; + ::std::vector< ::std::string> fields; + SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields); + int write_fd = -1; + +# if GTEST_OS_WINDOWS + + unsigned int parent_process_id = 0; + size_t write_handle_as_size_t = 0; + size_t event_handle_as_size_t = 0; + + if (fields.size() != 6 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &parent_process_id) + || !ParseNaturalNumber(fields[4], &write_handle_as_size_t) + || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { + DeathTestAbort(String::Format( + "Bad --gtest_internal_run_death_test flag: %s", + GTEST_FLAG(internal_run_death_test).c_str())); + } + write_fd = GetStatusFileDescriptor(parent_process_id, + write_handle_as_size_t, + event_handle_as_size_t); +# else + + if (fields.size() != 4 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &write_fd)) { + DeathTestAbort(String::Format( + "Bad --gtest_internal_run_death_test flag: %s", + GTEST_FLAG(internal_run_death_test).c_str())); + } + +# endif // GTEST_OS_WINDOWS + + return new InternalRunDeathTestFlag(fields[0], line, index, write_fd); +} + +} // namespace internal + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: keith.ray@gmail.com (Keith Ray) + + +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include +#elif GTEST_OS_WINDOWS +# include +# include +#elif GTEST_OS_SYMBIAN || GTEST_OS_NACL +// Symbian OpenC and NaCl have PATH_MAX in sys/syslimits.h +# include +#else +# include +# include // Some Linux distributions define PATH_MAX here. +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_MAX_ _MAX_PATH +#elif defined(PATH_MAX) +# define GTEST_PATH_MAX_ PATH_MAX +#elif defined(_XOPEN_PATH_MAX) +# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX +#else +# define GTEST_PATH_MAX_ _POSIX_PATH_MAX +#endif // GTEST_OS_WINDOWS + + +namespace testing { +namespace internal { + +#if GTEST_OS_WINDOWS +// On Windows, '\\' is the standard path separator, but many tools and the +// Windows API also accept '/' as an alternate path separator. Unless otherwise +// noted, a file path can contain either kind of path separators, or a mixture +// of them. +const char kPathSeparator = '\\'; +const char kAlternatePathSeparator = '/'; +const char kPathSeparatorString[] = "\\"; +const char kAlternatePathSeparatorString[] = "/"; +# if GTEST_OS_WINDOWS_MOBILE +// Windows CE doesn't have a current directory. You should not use +// the current directory in tests on Windows CE, but this at least +// provides a reasonable fallback. +const char kCurrentDirectoryString[] = "\\"; +// Windows CE doesn't define INVALID_FILE_ATTRIBUTES +const DWORD kInvalidFileAttributes = 0xffffffff; +# else +const char kCurrentDirectoryString[] = ".\\"; +# endif // GTEST_OS_WINDOWS_MOBILE +#else +const char kPathSeparator = '/'; +const char kPathSeparatorString[] = "/"; +const char kCurrentDirectoryString[] = "./"; +#endif // GTEST_OS_WINDOWS + +// Returns whether the given character is a valid path separator. +static bool IsPathSeparator(char c) { +#if GTEST_HAS_ALT_PATH_SEP_ + return (c == kPathSeparator) || (c == kAlternatePathSeparator); +#else + return c == kPathSeparator; +#endif +} + +// Returns the current working directory, or "" if unsuccessful. +FilePath FilePath::GetCurrentDir() { +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE doesn't have a current directory, so we just return + // something reasonable. + return FilePath(kCurrentDirectoryString); +#elif GTEST_OS_WINDOWS + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#else + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns a copy of the FilePath with the case-insensitive extension removed. +// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns +// FilePath("dir/file"). If a case-insensitive extension is not +// found, returns a copy of the original FilePath. +FilePath FilePath::RemoveExtension(const char* extension) const { + String dot_extension(String::Format(".%s", extension)); + if (pathname_.EndsWithCaseInsensitive(dot_extension.c_str())) { + return FilePath(String(pathname_.c_str(), pathname_.length() - 4)); + } + return *this; +} + +// Returns a pointer to the last occurence of a valid path separator in +// the FilePath. On Windows, for example, both '/' and '\' are valid path +// separators. Returns NULL if no path separator was found. +const char* FilePath::FindLastPathSeparator() const { + const char* const last_sep = strrchr(c_str(), kPathSeparator); +#if GTEST_HAS_ALT_PATH_SEP_ + const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator); + // Comparing two pointers of which only one is NULL is undefined. + if (last_alt_sep != NULL && + (last_sep == NULL || last_alt_sep > last_sep)) { + return last_alt_sep; + } +#endif + return last_sep; +} + +// Returns a copy of the FilePath with the directory part removed. +// Example: FilePath("path/to/file").RemoveDirectoryName() returns +// FilePath("file"). If there is no directory part ("just_a_file"), it returns +// the FilePath unmodified. If there is no file part ("just_a_dir/") it +// returns an empty FilePath (""). +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveDirectoryName() const { + const char* const last_sep = FindLastPathSeparator(); + return last_sep ? FilePath(String(last_sep + 1)) : *this; +} + +// RemoveFileName returns the directory path with the filename removed. +// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". +// If the FilePath is "a_file" or "/a_file", RemoveFileName returns +// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does +// not have a file, like "just/a/dir/", it returns the FilePath unmodified. +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveFileName() const { + const char* const last_sep = FindLastPathSeparator(); + String dir; + if (last_sep) { + dir = String(c_str(), last_sep + 1 - c_str()); + } else { + dir = kCurrentDirectoryString; + } + return FilePath(dir); +} + +// Helper functions for naming files in a directory for xml output. + +// Given directory = "dir", base_name = "test", number = 0, +// extension = "xml", returns "dir/test.xml". If number is greater +// than zero (e.g., 12), returns "dir/test_12.xml". +// On Windows platform, uses \ as the separator rather than /. +FilePath FilePath::MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension) { + String file; + if (number == 0) { + file = String::Format("%s.%s", base_name.c_str(), extension); + } else { + file = String::Format("%s_%d.%s", base_name.c_str(), number, extension); + } + return ConcatPaths(directory, FilePath(file)); +} + +// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml". +// On Windows, uses \ as the separator rather than /. +FilePath FilePath::ConcatPaths(const FilePath& directory, + const FilePath& relative_path) { + if (directory.IsEmpty()) + return relative_path; + const FilePath dir(directory.RemoveTrailingPathSeparator()); + return FilePath(String::Format("%s%c%s", dir.c_str(), kPathSeparator, + relative_path.c_str())); +} + +// Returns true if pathname describes something findable in the file-system, +// either a file, directory, or whatever. +bool FilePath::FileOrDirectoryExists() const { +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + return attributes != kInvalidFileAttributes; +#else + posix::StatStruct file_stat; + return posix::Stat(pathname_.c_str(), &file_stat) == 0; +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns true if pathname describes a directory in the file-system +// that exists. +bool FilePath::DirectoryExists() const { + bool result = false; +#if GTEST_OS_WINDOWS + // Don't strip off trailing separator if path is a root directory on + // Windows (like "C:\\"). + const FilePath& path(IsRootDirectory() ? *this : + RemoveTrailingPathSeparator()); +#else + const FilePath& path(*this); +#endif + +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(path.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + if ((attributes != kInvalidFileAttributes) && + (attributes & FILE_ATTRIBUTE_DIRECTORY)) { + result = true; + } +#else + posix::StatStruct file_stat; + result = posix::Stat(path.c_str(), &file_stat) == 0 && + posix::IsDir(file_stat); +#endif // GTEST_OS_WINDOWS_MOBILE + + return result; +} + +// Returns true if pathname describes a root directory. (Windows has one +// root directory per disk drive.) +bool FilePath::IsRootDirectory() const { +#if GTEST_OS_WINDOWS + // TODO(wan@google.com): on Windows a network share like + // \\server\share can be a root directory, although it cannot be the + // current directory. Handle this properly. + return pathname_.length() == 3 && IsAbsolutePath(); +#else + return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]); +#endif +} + +// Returns true if pathname describes an absolute path. +bool FilePath::IsAbsolutePath() const { + const char* const name = pathname_.c_str(); +#if GTEST_OS_WINDOWS + return pathname_.length() >= 3 && + ((name[0] >= 'a' && name[0] <= 'z') || + (name[0] >= 'A' && name[0] <= 'Z')) && + name[1] == ':' && + IsPathSeparator(name[2]); +#else + return IsPathSeparator(name[0]); +#endif +} + +// Returns a pathname for a file that does not currently exist. The pathname +// will be directory/base_name.extension or +// directory/base_name_.extension if directory/base_name.extension +// already exists. The number will be incremented until a pathname is found +// that does not already exist. +// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. +// There could be a race condition if two or more processes are calling this +// function at the same time -- they could both pick the same filename. +FilePath FilePath::GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension) { + FilePath full_pathname; + int number = 0; + do { + full_pathname.Set(MakeFileName(directory, base_name, number++, extension)); + } while (full_pathname.FileOrDirectoryExists()); + return full_pathname; +} + +// Returns true if FilePath ends with a path separator, which indicates that +// it is intended to represent a directory. Returns false otherwise. +// This does NOT check that a directory (or file) actually exists. +bool FilePath::IsDirectory() const { + return !pathname_.empty() && + IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]); +} + +// Create directories so that path exists. Returns true if successful or if +// the directories already exist; returns false if unable to create directories +// for any reason. +bool FilePath::CreateDirectoriesRecursively() const { + if (!this->IsDirectory()) { + return false; + } + + if (pathname_.length() == 0 || this->DirectoryExists()) { + return true; + } + + const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName()); + return parent.CreateDirectoriesRecursively() && this->CreateFolder(); +} + +// Create the directory so that path exists. Returns true if successful or +// if the directory already exists; returns false if unable to create the +// directory for any reason, including if the parent directory does not +// exist. Not named "CreateDirectory" because that's a macro on Windows. +bool FilePath::CreateFolder() const { +#if GTEST_OS_WINDOWS_MOBILE + FilePath removed_sep(this->RemoveTrailingPathSeparator()); + LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str()); + int result = CreateDirectory(unicode, NULL) ? 0 : -1; + delete [] unicode; +#elif GTEST_OS_WINDOWS + int result = _mkdir(pathname_.c_str()); +#else + int result = mkdir(pathname_.c_str(), 0777); +#endif // GTEST_OS_WINDOWS_MOBILE + + if (result == -1) { + return this->DirectoryExists(); // An error is OK if the directory exists. + } + return true; // No error. +} + +// If input name has a trailing separator character, remove it and return the +// name, otherwise return the name string unmodified. +// On Windows platform, uses \ as the separator, other platforms use /. +FilePath FilePath::RemoveTrailingPathSeparator() const { + return IsDirectory() + ? FilePath(String(pathname_.c_str(), pathname_.length() - 1)) + : *this; +} + +// Removes any redundant separators that might be in the pathname. +// For example, "bar///foo" becomes "bar/foo". Does not eliminate other +// redundancies that might be in a pathname involving "." or "..". +// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share). +void FilePath::Normalize() { + if (pathname_.c_str() == NULL) { + pathname_ = ""; + return; + } + const char* src = pathname_.c_str(); + char* const dest = new char[pathname_.length() + 1]; + char* dest_ptr = dest; + memset(dest_ptr, 0, pathname_.length() + 1); + + while (*src != '\0') { + *dest_ptr = *src; + if (!IsPathSeparator(*src)) { + src++; + } else { +#if GTEST_HAS_ALT_PATH_SEP_ + if (*dest_ptr == kAlternatePathSeparator) { + *dest_ptr = kPathSeparator; + } +#endif + while (IsPathSeparator(*src)) + src++; + } + dest_ptr++; + } + *dest_ptr = '\0'; + pathname_ = dest; + delete[] dest; +} + +} // namespace internal +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +#include +#include +#include +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include // For TerminateProcess() +#elif GTEST_OS_WINDOWS +# include +# include +#else +# include +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_MAC +# include +# include +# include +#endif // GTEST_OS_MAC + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { +namespace internal { + +#if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC and C++Builder do not provide a definition of STDERR_FILENO. +const int kStdOutFileno = 1; +const int kStdErrFileno = 2; +#else +const int kStdOutFileno = STDOUT_FILENO; +const int kStdErrFileno = STDERR_FILENO; +#endif // _MSC_VER + +#if GTEST_OS_MAC + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const task_t task = mach_task_self(); + mach_msg_type_number_t thread_count; + thread_act_array_t thread_list; + const kern_return_t status = task_threads(task, &thread_list, &thread_count); + if (status == KERN_SUCCESS) { + // task_threads allocates resources in thread_list and we need to free them + // to avoid leaks. + vm_deallocate(task, + reinterpret_cast(thread_list), + sizeof(thread_t) * thread_count); + return static_cast(thread_count); + } else { + return 0; + } +} + +#else + +size_t GetThreadCount() { + // There's no portable way to detect the number of threads, so we just + // return 0 to indicate that we cannot detect it. + return 0; +} + +#endif // GTEST_OS_MAC + +#if GTEST_USES_POSIX_RE + +// Implements RE. Currently only needed for death tests. + +RE::~RE() { + if (is_valid_) { + // regfree'ing an invalid regex might crash because the content + // of the regex is undefined. Since the regex's are essentially + // the same, one cannot be valid (or invalid) without the other + // being so too. + regfree(&partial_regex_); + regfree(&full_regex_); + } + free(const_cast(pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.full_regex_, str, 1, &match, 0) == 0; +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.partial_regex_, str, 1, &match, 0) == 0; +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = posix::StrDup(regex); + + // Reserves enough bytes to hold the regular expression used for a + // full match. + const size_t full_regex_len = strlen(regex) + 10; + char* const full_pattern = new char[full_regex_len]; + + snprintf(full_pattern, full_regex_len, "^(%s)$", regex); + is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0; + // We want to call regcomp(&partial_regex_, ...) even if the + // previous expression returns false. Otherwise partial_regex_ may + // not be properly initialized can may cause trouble when it's + // freed. + // + // Some implementation of POSIX regex (e.g. on at least some + // versions of Cygwin) doesn't accept the empty string as a valid + // regex. We change it to an equivalent form "()" to be safe. + if (is_valid_) { + const char* const partial_regex = (*regex == '\0') ? "()" : regex; + is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0; + } + EXPECT_TRUE(is_valid_) + << "Regular expression \"" << regex + << "\" is not a valid POSIX Extended regular expression."; + + delete[] full_pattern; +} + +#elif GTEST_USES_SIMPLE_RE + +// Returns true iff ch appears anywhere in str (excluding the +// terminating '\0' character). +bool IsInSet(char ch, const char* str) { + return ch != '\0' && strchr(str, ch) != NULL; +} + +// Returns true iff ch belongs to the given classification. Unlike +// similar functions in , these aren't affected by the +// current locale. +bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; } +bool IsAsciiPunct(char ch) { + return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~"); +} +bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); } +bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); } +bool IsAsciiWordChar(char ch) { + return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || + ('0' <= ch && ch <= '9') || ch == '_'; +} + +// Returns true iff "\\c" is a supported escape sequence. +bool IsValidEscape(char c) { + return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW")); +} + +// Returns true iff the given atom (specified by escaped and pattern) +// matches ch. The result is undefined if the atom is invalid. +bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { + if (escaped) { // "\\p" where p is pattern_char. + switch (pattern_char) { + case 'd': return IsAsciiDigit(ch); + case 'D': return !IsAsciiDigit(ch); + case 'f': return ch == '\f'; + case 'n': return ch == '\n'; + case 'r': return ch == '\r'; + case 's': return IsAsciiWhiteSpace(ch); + case 'S': return !IsAsciiWhiteSpace(ch); + case 't': return ch == '\t'; + case 'v': return ch == '\v'; + case 'w': return IsAsciiWordChar(ch); + case 'W': return !IsAsciiWordChar(ch); + } + return IsAsciiPunct(pattern_char) && pattern_char == ch; + } + + return (pattern_char == '.' && ch != '\n') || pattern_char == ch; +} + +// Helper function used by ValidateRegex() to format error messages. +String FormatRegexSyntaxError(const char* regex, int index) { + return (Message() << "Syntax error at index " << index + << " in simple regular expression \"" << regex << "\": ").GetString(); +} + +// Generates non-fatal failures and returns false if regex is invalid; +// otherwise returns true. +bool ValidateRegex(const char* regex) { + if (regex == NULL) { + // TODO(wan@google.com): fix the source file location in the + // assertion failures to match where the regex is used in user + // code. + ADD_FAILURE() << "NULL is not a valid simple regular expression."; + return false; + } + + bool is_valid = true; + + // True iff ?, *, or + can follow the previous atom. + bool prev_repeatable = false; + for (int i = 0; regex[i]; i++) { + if (regex[i] == '\\') { // An escape sequence + i++; + if (regex[i] == '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "'\\' cannot appear at the end."; + return false; + } + + if (!IsValidEscape(regex[i])) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "invalid escape sequence \"\\" << regex[i] << "\"."; + is_valid = false; + } + prev_repeatable = true; + } else { // Not an escape sequence. + const char ch = regex[i]; + + if (ch == '^' && i > 0) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'^' can only appear at the beginning."; + is_valid = false; + } else if (ch == '$' && regex[i + 1] != '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'$' can only appear at the end."; + is_valid = false; + } else if (IsInSet(ch, "()[]{}|")) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' is unsupported."; + is_valid = false; + } else if (IsRepeat(ch) && !prev_repeatable) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' can only follow a repeatable token."; + is_valid = false; + } + + prev_repeatable = !IsInSet(ch, "^$?*+"); + } + } + + return is_valid; +} + +// Matches a repeated regex atom followed by a valid simple regular +// expression. The regex atom is defined as c if escaped is false, +// or \c otherwise. repeat is the repetition meta character (?, *, +// or +). The behavior is undefined if str contains too many +// characters to be indexable by size_t, in which case the test will +// probably time out anyway. We are fine with this limitation as +// std::string has it too. +bool MatchRepetitionAndRegexAtHead( + bool escaped, char c, char repeat, const char* regex, + const char* str) { + const size_t min_count = (repeat == '+') ? 1 : 0; + const size_t max_count = (repeat == '?') ? 1 : + static_cast(-1) - 1; + // We cannot call numeric_limits::max() as it conflicts with the + // max() macro on Windows. + + for (size_t i = 0; i <= max_count; ++i) { + // We know that the atom matches each of the first i characters in str. + if (i >= min_count && MatchRegexAtHead(regex, str + i)) { + // We have enough matches at the head, and the tail matches too. + // Since we only care about *whether* the pattern matches str + // (as opposed to *how* it matches), there is no need to find a + // greedy match. + return true; + } + if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i])) + return false; + } + return false; +} + +// Returns true iff regex matches a prefix of str. regex must be a +// valid simple regular expression and not start with "^", or the +// result is undefined. +bool MatchRegexAtHead(const char* regex, const char* str) { + if (*regex == '\0') // An empty regex matches a prefix of anything. + return true; + + // "$" only matches the end of a string. Note that regex being + // valid guarantees that there's nothing after "$" in it. + if (*regex == '$') + return *str == '\0'; + + // Is the first thing in regex an escape sequence? + const bool escaped = *regex == '\\'; + if (escaped) + ++regex; + if (IsRepeat(regex[1])) { + // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so + // here's an indirect recursion. It terminates as the regex gets + // shorter in each recursion. + return MatchRepetitionAndRegexAtHead( + escaped, regex[0], regex[1], regex + 2, str); + } else { + // regex isn't empty, isn't "$", and doesn't start with a + // repetition. We match the first atom of regex with the first + // character of str and recurse. + return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) && + MatchRegexAtHead(regex + 1, str + 1); + } +} + +// Returns true iff regex matches any substring of str. regex must be +// a valid simple regular expression, or the result is undefined. +// +// The algorithm is recursive, but the recursion depth doesn't exceed +// the regex length, so we won't need to worry about running out of +// stack space normally. In rare cases the time complexity can be +// exponential with respect to the regex length + the string length, +// but usually it's must faster (often close to linear). +bool MatchRegexAnywhere(const char* regex, const char* str) { + if (regex == NULL || str == NULL) + return false; + + if (*regex == '^') + return MatchRegexAtHead(regex + 1, str); + + // A successful match can be anywhere in str. + do { + if (MatchRegexAtHead(regex, str)) + return true; + } while (*str++ != '\0'); + return false; +} + +// Implements the RE class. + +RE::~RE() { + free(const_cast(pattern_)); + free(const_cast(full_pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str); +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str); +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = full_pattern_ = NULL; + if (regex != NULL) { + pattern_ = posix::StrDup(regex); + } + + is_valid_ = ValidateRegex(regex); + if (!is_valid_) { + // No need to calculate the full pattern when the regex is invalid. + return; + } + + const size_t len = strlen(regex); + // Reserves enough bytes to hold the regular expression used for a + // full match: we need space to prepend a '^', append a '$', and + // terminate the string with '\0'. + char* buffer = static_cast(malloc(len + 3)); + full_pattern_ = buffer; + + if (*regex != '^') + *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'. + + // We don't use snprintf or strncpy, as they trigger a warning when + // compiled with VC++ 8.0. + memcpy(buffer, regex, len); + buffer += len; + + if (len == 0 || regex[len - 1] != '$') + *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'. + + *buffer = '\0'; +} + +#endif // GTEST_USES_POSIX_RE + +const char kUnknownFile[] = "unknown file"; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { + const char* const file_name = file == NULL ? kUnknownFile : file; + + if (line < 0) { + return String::Format("%s:", file_name).c_str(); + } +#ifdef _MSC_VER + return String::Format("%s(%d):", file_name, line).c_str(); +#else + return String::Format("%s:%d:", file_name, line).c_str(); +#endif // _MSC_VER +} + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +// Note that FormatCompilerIndependentFileLocation() does NOT append colon +// to the file location it produces, unlike FormatFileLocation(). +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( + const char* file, int line) { + const char* const file_name = file == NULL ? kUnknownFile : file; + + if (line < 0) + return file_name; + else + return String::Format("%s:%d", file_name, line).c_str(); +} + + +GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line) + : severity_(severity) { + const char* const marker = + severity == GTEST_INFO ? "[ INFO ]" : + severity == GTEST_WARNING ? "[WARNING]" : + severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]"; + GetStream() << ::std::endl << marker << " " + << FormatFileLocation(file, line).c_str() << ": "; +} + +// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. +GTestLog::~GTestLog() { + GetStream() << ::std::endl; + if (severity_ == GTEST_FATAL) { + fflush(stderr); + posix::Abort(); + } +} +// Disable Microsoft deprecation warnings for POSIX functions called from +// this class (creat, dup, dup2, and close) +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable: 4996) +#endif // _MSC_VER + +#if GTEST_HAS_STREAM_REDIRECTION + +// Object that captures an output stream (stdout/stderr). +class CapturedStream { + public: + // The ctor redirects the stream to a temporary file. + CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { + +# if GTEST_OS_WINDOWS + char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT + char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT + + ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path); + const UINT success = ::GetTempFileNameA(temp_dir_path, + "gtest_redir", + 0, // Generate unique file name. + temp_file_path); + GTEST_CHECK_(success != 0) + << "Unable to create a temporary file in " << temp_dir_path; + const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE); + GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file " + << temp_file_path; + filename_ = temp_file_path; +# else + // There's no guarantee that a test has write access to the + // current directory, so we create the temporary file in the /tmp + // directory instead. + char name_template[] = "/tmp/captured_stream.XXXXXX"; + const int captured_fd = mkstemp(name_template); + filename_ = name_template; +# endif // GTEST_OS_WINDOWS + fflush(NULL); + dup2(captured_fd, fd_); + close(captured_fd); + } + + ~CapturedStream() { + remove(filename_.c_str()); + } + + String GetCapturedString() { + if (uncaptured_fd_ != -1) { + // Restores the original stream. + fflush(NULL); + dup2(uncaptured_fd_, fd_); + close(uncaptured_fd_); + uncaptured_fd_ = -1; + } + + FILE* const file = posix::FOpen(filename_.c_str(), "r"); + const String content = ReadEntireFile(file); + posix::FClose(file); + return content; + } + + private: + // Reads the entire content of a file as a String. + static String ReadEntireFile(FILE* file); + + // Returns the size (in bytes) of a file. + static size_t GetFileSize(FILE* file); + + const int fd_; // A stream to capture. + int uncaptured_fd_; + // Name of the temporary file holding the stderr output. + ::std::string filename_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream); +}; + +// Returns the size (in bytes) of a file. +size_t CapturedStream::GetFileSize(FILE* file) { + fseek(file, 0, SEEK_END); + return static_cast(ftell(file)); +} + +// Reads the entire content of a file as a string. +String CapturedStream::ReadEntireFile(FILE* file) { + const size_t file_size = GetFileSize(file); + char* const buffer = new char[file_size]; + + size_t bytes_last_read = 0; // # of bytes read in the last fread() + size_t bytes_read = 0; // # of bytes read so far + + fseek(file, 0, SEEK_SET); + + // Keeps reading the file until we cannot read further or the + // pre-determined file size is reached. + do { + bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file); + bytes_read += bytes_last_read; + } while (bytes_last_read > 0 && bytes_read < file_size); + + const String content(buffer, bytes_read); + delete[] buffer; + + return content; +} + +# ifdef _MSC_VER +# pragma warning(pop) +# endif // _MSC_VER + +static CapturedStream* g_captured_stderr = NULL; +static CapturedStream* g_captured_stdout = NULL; + +// Starts capturing an output stream (stdout/stderr). +void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) { + if (*stream != NULL) { + GTEST_LOG_(FATAL) << "Only one " << stream_name + << " capturer can exist at a time."; + } + *stream = new CapturedStream(fd); +} + +// Stops capturing the output stream and returns the captured string. +String GetCapturedStream(CapturedStream** captured_stream) { + const String content = (*captured_stream)->GetCapturedString(); + + delete *captured_stream; + *captured_stream = NULL; + + return content; +} + +// Starts capturing stdout. +void CaptureStdout() { + CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout); +} + +// Starts capturing stderr. +void CaptureStderr() { + CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr); +} + +// Stops capturing stdout and returns the captured string. +String GetCapturedStdout() { return GetCapturedStream(&g_captured_stdout); } + +// Stops capturing stderr and returns the captured string. +String GetCapturedStderr() { return GetCapturedStream(&g_captured_stderr); } + +#endif // GTEST_HAS_STREAM_REDIRECTION + +#if GTEST_HAS_DEATH_TEST + +// A copy of all command line arguments. Set by InitGoogleTest(). +::std::vector g_argvs; + +// Returns the command line as a vector of strings. +const ::std::vector& GetArgvs() { return g_argvs; } + +#endif // GTEST_HAS_DEATH_TEST + +#if GTEST_OS_WINDOWS_MOBILE +namespace posix { +void Abort() { + DebugBreak(); + TerminateProcess(GetCurrentProcess(), 1); +} +} // namespace posix +#endif // GTEST_OS_WINDOWS_MOBILE + +// Returns the name of the environment variable corresponding to the +// given flag. For example, FlagToEnvVar("foo") will return +// "GTEST_FOO" in the open-source version. +static String FlagToEnvVar(const char* flag) { + const String full_flag = + (Message() << GTEST_FLAG_PREFIX_ << flag).GetString(); + + Message env_var; + for (size_t i = 0; i != full_flag.length(); i++) { + env_var << ToUpper(full_flag.c_str()[i]); + } + + return env_var.GetString(); +} + +// Parses 'str' for a 32-bit signed integer. If successful, writes +// the result to *value and returns true; otherwise leaves *value +// unchanged and returns false. +bool ParseInt32(const Message& src_text, const char* str, Int32* value) { + // Parses the environment variable as a decimal integer. + char* end = NULL; + const long long_value = strtol(str, &end, 10); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value \"" << str << "\".\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + // Is the parsed value in the range of an Int32? + const Int32 result = static_cast(long_value); + if (long_value == LONG_MAX || long_value == LONG_MIN || + // The parsed value overflows as a long. (strtol() returns + // LONG_MAX or LONG_MIN when the input overflows.) + result != long_value + // The parsed value overflows as an Int32. + ) { + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value " << str << ", which overflows.\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + *value = result; + return true; +} + +// Reads and returns the Boolean environment variable corresponding to +// the given flag; if it's not set, returns default_value. +// +// The value is considered true iff it's not "0". +bool BoolFromGTestEnv(const char* flag, bool default_value) { + const String env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + return string_value == NULL ? + default_value : strcmp(string_value, "0") != 0; +} + +// Reads and returns a 32-bit integer stored in the environment +// variable corresponding to the given flag; if it isn't set or +// doesn't represent a valid 32-bit integer, returns default_value. +Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) { + const String env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + if (string_value == NULL) { + // The environment variable is not set. + return default_value; + } + + Int32 result = default_value; + if (!ParseInt32(Message() << "Environment variable " << env_var, + string_value, &result)) { + printf("The default value %s is used.\n", + (Message() << default_value).GetString().c_str()); + fflush(stdout); + return default_value; + } + + return result; +} + +// Reads and returns the string environment variable corresponding to +// the given flag; if it's not set, returns default_value. +const char* StringFromGTestEnv(const char* flag, const char* default_value) { + const String env_var = FlagToEnvVar(flag); + const char* const value = posix::GetEnv(env_var.c_str()); + return value == NULL ? default_value : value; +} + +} // namespace internal +} // namespace testing +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// It uses the << operator when possible, and prints the bytes in the +// object otherwise. A user can override its behavior for a class +// type Foo by defining either operator<<(::std::ostream&, const Foo&) +// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that +// defines Foo. + +#include +#include +#include // NOLINT +#include + +namespace testing { + +namespace { + +using ::std::ostream; + +#if GTEST_OS_WINDOWS_MOBILE // Windows CE does not define _snprintf_s. +# define snprintf _snprintf +#elif _MSC_VER >= 1400 // VC 8.0 and later deprecate snprintf and _snprintf. +# define snprintf _snprintf_s +#elif _MSC_VER +# define snprintf _snprintf +#endif // GTEST_OS_WINDOWS_MOBILE + +// Prints a segment of bytes in the given object. +void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start, + size_t count, ostream* os) { + char text[5] = ""; + for (size_t i = 0; i != count; i++) { + const size_t j = start + i; + if (i != 0) { + // Organizes the bytes into groups of 2 for easy parsing by + // human. + if ((j % 2) == 0) + *os << ' '; + else + *os << '-'; + } + snprintf(text, sizeof(text), "%02X", obj_bytes[j]); + *os << text; + } +} + +// Prints the bytes in the given value to the given ostream. +void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count, + ostream* os) { + // Tells the user how big the object is. + *os << count << "-byte object <"; + + const size_t kThreshold = 132; + const size_t kChunkSize = 64; + // If the object size is bigger than kThreshold, we'll have to omit + // some details by printing only the first and the last kChunkSize + // bytes. + // TODO(wan): let the user control the threshold using a flag. + if (count < kThreshold) { + PrintByteSegmentInObjectTo(obj_bytes, 0, count, os); + } else { + PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os); + *os << " ... "; + // Rounds up to 2-byte boundary. + const size_t resume_pos = (count - kChunkSize + 1)/2*2; + PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os); + } + *os << ">"; +} + +} // namespace + +namespace internal2 { + +// Delegates to PrintBytesInObjectToImpl() to print the bytes in the +// given object. The delegation simplifies the implementation, which +// uses the << operator and thus is easier done outside of the +// ::testing::internal namespace, which contains a << operator that +// sometimes conflicts with the one in STL. +void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count, + ostream* os) { + PrintBytesInObjectToImpl(obj_bytes, count, os); +} + +} // namespace internal2 + +namespace internal { + +// Depending on the value of a char (or wchar_t), we print it in one +// of three formats: +// - as is if it's a printable ASCII (e.g. 'a', '2', ' '), +// - as a hexidecimal escape sequence (e.g. '\x7F'), or +// - as a special escape sequence (e.g. '\r', '\n'). +enum CharFormat { + kAsIs, + kHexEscape, + kSpecialEscape +}; + +// Returns true if c is a printable ASCII character. We test the +// value of c directly instead of calling isprint(), which is buggy on +// Windows Mobile. +inline bool IsPrintableAscii(wchar_t c) { + return 0x20 <= c && c <= 0x7E; +} + +// Prints a wide or narrow char c as a character literal without the +// quotes, escaping it when necessary; returns how c was formatted. +// The template argument UnsignedChar is the unsigned version of Char, +// which is the type of c. +template +static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) { + switch (static_cast(c)) { + case L'\0': + *os << "\\0"; + break; + case L'\'': + *os << "\\'"; + break; + case L'\\': + *os << "\\\\"; + break; + case L'\a': + *os << "\\a"; + break; + case L'\b': + *os << "\\b"; + break; + case L'\f': + *os << "\\f"; + break; + case L'\n': + *os << "\\n"; + break; + case L'\r': + *os << "\\r"; + break; + case L'\t': + *os << "\\t"; + break; + case L'\v': + *os << "\\v"; + break; + default: + if (IsPrintableAscii(c)) { + *os << static_cast(c); + return kAsIs; + } else { + *os << String::Format("\\x%X", static_cast(c)); + return kHexEscape; + } + } + return kSpecialEscape; +} + +// Prints a char c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) { + switch (c) { + case L'\'': + *os << "'"; + return kAsIs; + case L'"': + *os << "\\\""; + return kSpecialEscape; + default: + return PrintAsCharLiteralTo(c, os); + } +} + +// Prints a char c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) { + return PrintAsWideStringLiteralTo(static_cast(c), os); +} + +// Prints a wide or narrow character c and its code. '\0' is printed +// as "'\\0'", other unprintable characters are also properly escaped +// using the standard C++ escape sequence. The template argument +// UnsignedChar is the unsigned version of Char, which is the type of c. +template +void PrintCharAndCodeTo(Char c, ostream* os) { + // First, print c as a literal in the most readable form we can find. + *os << ((sizeof(c) > 1) ? "L'" : "'"); + const CharFormat format = PrintAsCharLiteralTo(c, os); + *os << "'"; + + // To aid user debugging, we also print c's code in decimal, unless + // it's 0 (in which case c was printed as '\\0', making the code + // obvious). + if (c == 0) + return; + *os << " (" << String::Format("%d", c).c_str(); + + // For more convenience, we print c's code again in hexidecimal, + // unless c was already printed in the form '\x##' or the code is in + // [1, 9]. + if (format == kHexEscape || (1 <= c && c <= 9)) { + // Do nothing. + } else { + *os << String::Format(", 0x%X", + static_cast(c)).c_str(); + } + *os << ")"; +} + +void PrintTo(unsigned char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} +void PrintTo(signed char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} + +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its code. L'\0' is printed as "L'\\0'". +void PrintTo(wchar_t wc, ostream* os) { + PrintCharAndCodeTo(wc, os); +} + +// Prints the given array of characters to the ostream. +// The array starts at *begin, the length is len, it may include '\0' characters +// and may not be null-terminated. +static void PrintCharsAsStringTo(const char* begin, size_t len, ostream* os) { + *os << "\""; + bool is_previous_hex = false; + for (size_t index = 0; index < len; ++index) { + const char cur = begin[index]; + if (is_previous_hex && IsXDigit(cur)) { + // Previous character is of '\x..' form and this character can be + // interpreted as another hexadecimal digit in its number. Break string to + // disambiguate. + *os << "\" \""; + } + is_previous_hex = PrintAsNarrowStringLiteralTo(cur, os) == kHexEscape; + } + *os << "\""; +} + +// Prints a (const) char array of 'len' elements, starting at address 'begin'. +void UniversalPrintArray(const char* begin, size_t len, ostream* os) { + PrintCharsAsStringTo(begin, len, os); +} + +// Prints the given array of wide characters to the ostream. +// The array starts at *begin, the length is len, it may include L'\0' +// characters and may not be null-terminated. +static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len, + ostream* os) { + *os << "L\""; + bool is_previous_hex = false; + for (size_t index = 0; index < len; ++index) { + const wchar_t cur = begin[index]; + if (is_previous_hex && isascii(cur) && IsXDigit(static_cast(cur))) { + // Previous character is of '\x..' form and this character can be + // interpreted as another hexadecimal digit in its number. Break string to + // disambiguate. + *os << "\" L\""; + } + is_previous_hex = PrintAsWideStringLiteralTo(cur, os) == kHexEscape; + } + *os << "\""; +} + +// Prints the given C string to the ostream. +void PrintTo(const char* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, strlen(s), os); + } +} + +// MSVC compiler can be configured to define whar_t as a typedef +// of unsigned short. Defining an overload for const wchar_t* in that case +// would cause pointers to unsigned shorts be printed as wide strings, +// possibly accessing more memory than intended and causing invalid +// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when +// wchar_t is implemented as a native type. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Prints the given wide C string to the ostream. +void PrintTo(const wchar_t* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintWideCharsAsStringTo(s, wcslen(s), os); + } +} +#endif // wchar_t is native + +// Prints a ::string object. +#if GTEST_HAS_GLOBAL_STRING +void PrintStringTo(const ::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +void PrintStringTo(const ::std::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} + +// Prints a ::wstring object. +#if GTEST_HAS_GLOBAL_WSTRING +void PrintWideStringTo(const ::wstring& s, ostream* os) { + PrintWideCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +void PrintWideStringTo(const ::std::wstring& s, ostream* os) { + PrintWideCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_STD_WSTRING + +} // namespace internal + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// The Google C++ Testing Framework (Google Test) + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +using internal::GetUnitTestImpl; + +// Gets the summary of the failure message by omitting the stack trace +// in it. +internal::String TestPartResult::ExtractSummary(const char* message) { + const char* const stack_trace = strstr(message, internal::kStackTraceMarker); + return stack_trace == NULL ? internal::String(message) : + internal::String(message, stack_trace - message); +} + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result) { + return os + << result.file_name() << ":" << result.line_number() << ": " + << (result.type() == TestPartResult::kSuccess ? "Success" : + result.type() == TestPartResult::kFatalFailure ? "Fatal failure" : + "Non-fatal failure") << ":\n" + << result.message() << std::endl; +} + +// Appends a TestPartResult to the array. +void TestPartResultArray::Append(const TestPartResult& result) { + array_.push_back(result); +} + +// Returns the TestPartResult at the given index (0-based). +const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const { + if (index < 0 || index >= size()) { + printf("\nInvalid index (%d) into TestPartResultArray.\n", index); + internal::posix::Abort(); + } + + return array_[index]; +} + +// Returns the number of TestPartResult objects in the array. +int TestPartResultArray::size() const { + return static_cast(array_.size()); +} + +namespace internal { + +HasNewFatalFailureHelper::HasNewFatalFailureHelper() + : has_new_fatal_failure_(false), + original_reporter_(GetUnitTestImpl()-> + GetTestPartResultReporterForCurrentThread()) { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this); +} + +HasNewFatalFailureHelper::~HasNewFatalFailureHelper() { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread( + original_reporter_); +} + +void HasNewFatalFailureHelper::ReportTestPartResult( + const TestPartResult& result) { + if (result.fatally_failed()) + has_new_fatal_failure_ = true; + original_reporter_->ReportTestPartResult(result); +} + +} // namespace internal + +} // namespace testing +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +namespace testing { +namespace internal { + +#if GTEST_HAS_TYPED_TEST_P + +// Skips to the first non-space char in str. Returns an empty string if str +// contains only whitespace characters. +static const char* SkipSpaces(const char* str) { + while (IsSpace(*str)) + str++; + return str; +} + +// Verifies that registered_tests match the test names in +// defined_test_names_; returns registered_tests if successful, or +// aborts the program otherwise. +const char* TypedTestCasePState::VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests) { + typedef ::std::set::const_iterator DefinedTestIter; + registered_ = true; + + // Skip initial whitespace in registered_tests since some + // preprocessors prefix stringizied literals with whitespace. + registered_tests = SkipSpaces(registered_tests); + + Message errors; + ::std::set tests; + for (const char* names = registered_tests; names != NULL; + names = SkipComma(names)) { + const String name = GetPrefixUntilComma(names); + if (tests.count(name) != 0) { + errors << "Test " << name << " is listed more than once.\n"; + continue; + } + + bool found = false; + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (name == *it) { + found = true; + break; + } + } + + if (found) { + tests.insert(name); + } else { + errors << "No test named " << name + << " can be found in this test case.\n"; + } + } + + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (tests.count(*it) == 0) { + errors << "You forgot to list test " << *it << ".\n"; + } + } + + const String& errors_str = errors.GetString(); + if (errors_str != "") { + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors_str.c_str()); + fflush(stderr); + posix::Abort(); + } + + return registered_tests; +} + +#endif // GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing diff --git a/modules/dnns_easily_fooled/caffe/src/gtest/gtest.h b/modules/dnns_easily_fooled/caffe/src/gtest/gtest.h new file mode 100644 index 000000000..3143bd679 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/gtest/gtest.h @@ -0,0 +1,19537 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for Google Test. It should be +// included by any test program that uses Google Test. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! +// +// Acknowledgment: Google Test borrowed the idea of automatic test +// registration from Barthelemy Dagenais' (barthelemy@prologique.com) +// easyUnit framework. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_H_ + +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares functions and macros used internally by +// Google Test. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan) +// +// Low-level types and utilities for porting Google Test to various +// platforms. They are subject to change without notice. DO NOT USE +// THEM IN USER CODE. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +// The user can define the following macros in the build script to +// control Google Test's behavior. If the user doesn't define a macro +// in this list, Google Test will define it. +// +// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2) +// is/isn't available. +// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions +// are enabled. +// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::string, which is different to std::string). +// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::wstring, which is different to std::wstring). +// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular +// expressions are/aren't available. +// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that +// is/isn't available. +// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't +// enabled. +// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that +// std::wstring does/doesn't work (Google Test can +// be used where std::wstring is unavailable). +// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple +// is/isn't available. +// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the +// compiler supports Microsoft's "Structured +// Exception Handling". +// GTEST_HAS_STREAM_REDIRECTION +// - Define it to 1/0 to indicate whether the +// platform supports I/O stream redirection using +// dup() and dup2(). +// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google +// Test's own tr1 tuple implementation should be +// used. Unused when the user sets +// GTEST_HAS_TR1_TUPLE to 0. +// GTEST_LINKED_AS_SHARED_LIBRARY +// - Define to 1 when compiling tests that use +// Google Test as a shared library (known as +// DLL on Windows). +// GTEST_CREATE_SHARED_LIBRARY +// - Define to 1 when compiling Google Test itself +// as a shared library. + +// This header defines the following utilities: +// +// Macros indicating the current platform (defined to 1 if compiled on +// the given platform; otherwise undefined): +// GTEST_OS_AIX - IBM AIX +// GTEST_OS_CYGWIN - Cygwin +// GTEST_OS_HPUX - HP-UX +// GTEST_OS_LINUX - Linux +// GTEST_OS_LINUX_ANDROID - Google Android +// GTEST_OS_MAC - Mac OS X +// GTEST_OS_NACL - Google Native Client (NaCl) +// GTEST_OS_SOLARIS - Sun Solaris +// GTEST_OS_SYMBIAN - Symbian +// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile) +// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop +// GTEST_OS_WINDOWS_MINGW - MinGW +// GTEST_OS_WINDOWS_MOBILE - Windows Mobile +// GTEST_OS_ZOS - z/OS +// +// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the +// most stable support. Since core members of the Google Test project +// don't have access to other platforms, support for them may be less +// stable. If you notice any problems on your platform, please notify +// googletestframework@googlegroups.com (patches for fixing them are +// even more welcome!). +// +// Note that it is possible that none of the GTEST_OS_* macros are defined. +// +// Macros indicating available Google Test features (defined to 1 if +// the corresponding feature is supported; otherwise undefined): +// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized +// tests) +// GTEST_HAS_DEATH_TEST - death tests +// GTEST_HAS_PARAM_TEST - value-parameterized tests +// GTEST_HAS_TYPED_TEST - typed tests +// GTEST_HAS_TYPED_TEST_P - type-parameterized tests +// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with +// GTEST_HAS_POSIX_RE (see above) which users can +// define themselves. +// GTEST_USES_SIMPLE_RE - our own simple regex is used; +// the above two are mutually exclusive. +// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ(). +// +// Macros for basic C++ coding: +// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning. +// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a +// variable don't have to be used. +// GTEST_DISALLOW_ASSIGN_ - disables operator=. +// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=. +// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used. +// +// Synchronization: +// Mutex, MutexLock, ThreadLocal, GetThreadCount() +// - synchronization primitives. +// GTEST_IS_THREADSAFE - defined to 1 to indicate that the above +// synchronization primitives have real implementations +// and Google Test is thread-safe; or 0 otherwise. +// +// Template meta programming: +// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only. +// IteratorTraits - partial implementation of std::iterator_traits, which +// is not available in libCstd when compiled with Sun C++. +// +// Smart pointers: +// scoped_ptr - as in TR2. +// +// Regular expressions: +// RE - a simple regular expression class using the POSIX +// Extended Regular Expression syntax on UNIX-like +// platforms, or a reduced regular exception syntax on +// other platforms, including Windows. +// +// Logging: +// GTEST_LOG_() - logs messages at the specified severity level. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. +// +// Stdout and stderr capturing: +// CaptureStdout() - starts capturing stdout. +// GetCapturedStdout() - stops capturing stdout and returns the captured +// string. +// CaptureStderr() - starts capturing stderr. +// GetCapturedStderr() - stops capturing stderr and returns the captured +// string. +// +// Integer types: +// TypeWithSize - maps an integer to a int type. +// Int32, UInt32, Int64, UInt64, TimeInMillis +// - integers of known sizes. +// BiggestInt - the biggest signed integer type. +// +// Command-line utilities: +// GTEST_FLAG() - references a flag. +// GTEST_DECLARE_*() - declares a flag. +// GTEST_DEFINE_*() - defines a flag. +// GetArgvs() - returns the command line as a vector of strings. +// +// Environment variable utilities: +// GetEnv() - gets the value of an environment variable. +// BoolFromGTestEnv() - parses a bool environment variable. +// Int32FromGTestEnv() - parses an Int32 environment variable. +// StringFromGTestEnv() - parses a string environment variable. + +#include // for isspace, etc +#include // for ptrdiff_t +#include +#include +#include +#ifndef _WIN32_WCE +# include +# include +#endif // !_WIN32_WCE + +#include // NOLINT +#include // NOLINT +#include // NOLINT + +#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com" +#define GTEST_FLAG_PREFIX_ "gtest_" +#define GTEST_FLAG_PREFIX_DASH_ "gtest-" +#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_" +#define GTEST_NAME_ "Google Test" +#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/" + +// Determines the version of gcc that is used to compile this. +#ifdef __GNUC__ +// 40302 means version 4.3.2. +# define GTEST_GCC_VER_ \ + (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) +#endif // __GNUC__ + +// Determines the platform on which Google Test is compiled. +#ifdef __CYGWIN__ +# define GTEST_OS_CYGWIN 1 +#elif defined __SYMBIAN32__ +# define GTEST_OS_SYMBIAN 1 +#elif defined _WIN32 +# define GTEST_OS_WINDOWS 1 +# ifdef _WIN32_WCE +# define GTEST_OS_WINDOWS_MOBILE 1 +# elif defined(__MINGW__) || defined(__MINGW32__) +# define GTEST_OS_WINDOWS_MINGW 1 +# else +# define GTEST_OS_WINDOWS_DESKTOP 1 +# endif // _WIN32_WCE +#elif defined __APPLE__ +# define GTEST_OS_MAC 1 +#elif defined __linux__ +# define GTEST_OS_LINUX 1 +# ifdef ANDROID +# define GTEST_OS_LINUX_ANDROID 1 +# endif // ANDROID +#elif defined __MVS__ +# define GTEST_OS_ZOS 1 +#elif defined(__sun) && defined(__SVR4) +# define GTEST_OS_SOLARIS 1 +#elif defined(_AIX) +# define GTEST_OS_AIX 1 +#elif defined(__hpux) +# define GTEST_OS_HPUX 1 +#elif defined __native_client__ +# define GTEST_OS_NACL 1 +#endif // __CYGWIN__ + +// Brings in definitions for functions used in the testing::internal::posix +// namespace (read, write, close, chdir, isatty, stat). We do not currently +// use them on Windows Mobile. +#if !GTEST_OS_WINDOWS +// This assumes that non-Windows OSes provide unistd.h. For OSes where this +// is not the case, we need to include headers that provide the functions +// mentioned above. +# include +# if !GTEST_OS_NACL +// TODO(vladl@google.com): Remove this condition when Native Client SDK adds +// strings.h (tracked in +// http://code.google.com/p/nativeclient/issues/detail?id=1175). +# include // Native Client doesn't provide strings.h. +# endif +#elif !GTEST_OS_WINDOWS_MOBILE +# include +# include +#endif + +// Defines this to true iff Google Test can use POSIX regular expressions. +#ifndef GTEST_HAS_POSIX_RE +# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS) +#endif + +#if GTEST_HAS_POSIX_RE + +// On some platforms, needs someone to define size_t, and +// won't compile otherwise. We can #include it here as we already +// included , which is guaranteed to define size_t through +// . +# include // NOLINT + +# define GTEST_USES_POSIX_RE 1 + +#elif GTEST_OS_WINDOWS + +// is not available on Windows. Use our own simple regex +// implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#else + +// may not be available on this platform. Use our own +// simple regex implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#endif // GTEST_HAS_POSIX_RE + +#ifndef GTEST_HAS_EXCEPTIONS +// The user didn't tell us whether exceptions are enabled, so we need +// to figure it out. +# if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS +// macro to enable exceptions, so we'll do the same. +// Assumes that exceptions are enabled by default. +# ifndef _HAS_EXCEPTIONS +# define _HAS_EXCEPTIONS 1 +# endif // _HAS_EXCEPTIONS +# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS +# elif defined(__GNUC__) && __EXCEPTIONS +// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__SUNPRO_CC) +// Sun Pro CC supports exceptions. However, there is no compile-time way of +// detecting whether they are enabled or not. Therefore, we assume that +// they are enabled unless the user tells us otherwise. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__IBMCPP__) && __EXCEPTIONS +// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__HP_aCC) +// Exception handling is in effect by default in HP aCC compiler. It has to +// be turned of by +noeh compiler option if desired. +# define GTEST_HAS_EXCEPTIONS 1 +# else +// For other compilers, we assume exceptions are disabled to be +// conservative. +# define GTEST_HAS_EXCEPTIONS 0 +# endif // defined(_MSC_VER) || defined(__BORLANDC__) +#endif // GTEST_HAS_EXCEPTIONS + +#if !defined(GTEST_HAS_STD_STRING) +// Even though we don't use this macro any longer, we keep it in case +// some clients still depend on it. +# define GTEST_HAS_STD_STRING 1 +#elif !GTEST_HAS_STD_STRING +// The user told us that ::std::string isn't available. +# error "Google Test cannot be used where ::std::string isn't available." +#endif // !defined(GTEST_HAS_STD_STRING) + +#ifndef GTEST_HAS_GLOBAL_STRING +// The user didn't tell us whether ::string is available, so we need +// to figure it out. + +# define GTEST_HAS_GLOBAL_STRING 0 + +#endif // GTEST_HAS_GLOBAL_STRING + +#ifndef GTEST_HAS_STD_WSTRING +// The user didn't tell us whether ::std::wstring is available, so we need +// to figure it out. +// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring +// is available. + +// Cygwin 1.7 and below doesn't support ::std::wstring. +// Solaris' libc++ doesn't support it either. Android has +// no support for it at least as recent as Froyo (2.2). +# define GTEST_HAS_STD_WSTRING \ + (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS)) + +#endif // GTEST_HAS_STD_WSTRING + +#ifndef GTEST_HAS_GLOBAL_WSTRING +// The user didn't tell us whether ::wstring is available, so we need +// to figure it out. +# define GTEST_HAS_GLOBAL_WSTRING \ + (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING) +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Determines whether RTTI is available. +#ifndef GTEST_HAS_RTTI +// The user didn't tell us whether RTTI is enabled, so we need to +// figure it out. + +# ifdef _MSC_VER + +# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled. +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled. +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302) + +# ifdef __GXX_RTTI +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif // __GXX_RTTI + +// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if +// both the typeid and dynamic_cast features are present. +# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) + +# ifdef __RTTI_ALL__ +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +# else + +// For all other compilers, we assume RTTI is enabled. +# define GTEST_HAS_RTTI 1 + +# endif // _MSC_VER + +#endif // GTEST_HAS_RTTI + +// It's this header's responsibility to #include when RTTI +// is enabled. +#if GTEST_HAS_RTTI +# include +#endif + +// Determines whether Google Test can use the pthreads library. +#ifndef GTEST_HAS_PTHREAD +// The user didn't tell us explicitly, so we assume pthreads support is +// available on Linux and Mac. +// +// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0 +// to your compiler flags. +# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX) +#endif // GTEST_HAS_PTHREAD + +#if GTEST_HAS_PTHREAD +// gtest-port.h guarantees to #include when GTEST_HAS_PTHREAD is +// true. +# include // NOLINT + +// For timespec and nanosleep, used below. +# include // NOLINT +#endif + +// Determines whether Google Test can use tr1/tuple. You can define +// this macro to 0 to prevent Google Test from using tuple (any +// feature depending on tuple with be disabled in this mode). +#ifndef GTEST_HAS_TR1_TUPLE +// The user didn't tell us not to do it, so we assume it's OK. +# define GTEST_HAS_TR1_TUPLE 1 +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether Google Test's own tr1 tuple implementation +// should be used. +#ifndef GTEST_USE_OWN_TR1_TUPLE +// The user didn't tell us, so we need to figure it out. + +// We use our own TR1 tuple if we aren't sure the user has an +// implementation of it already. At this time, GCC 4.0.0+ and MSVC +// 2010 are the only mainstream compilers that come with a TR1 tuple +// implementation. NVIDIA's CUDA NVCC compiler pretends to be GCC by +// defining __GNUC__ and friends, but cannot compile GCC's tuple +// implementation. MSVC 2008 (9.0) provides TR1 tuple in a 323 MB +// Feature Pack download, which we cannot assume the user has. +# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000)) \ + || _MSC_VER >= 1600 +# define GTEST_USE_OWN_TR1_TUPLE 0 +# else +# define GTEST_USE_OWN_TR1_TUPLE 1 +# endif + +#endif // GTEST_USE_OWN_TR1_TUPLE + +// To avoid conditional compilation everywhere, we make it +// gtest-port.h's responsibility to #include the header implementing +// tr1/tuple. +#if GTEST_HAS_TR1_TUPLE + +# if GTEST_USE_OWN_TR1_TUPLE +// This file was GENERATED by a script. DO NOT EDIT BY HAND!!! + +// Copyright 2009 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Implements a subset of TR1 tuple needed by Google Test and Google Mock. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ + +#include // For ::std::pair. + +// The compiler used in Symbian has a bug that prevents us from declaring the +// tuple template as a friend (it complains that tuple is redefined). This +// hack bypasses the bug by declaring the members that should otherwise be +// private as public. +// Sun Studio versions < 12 also have the above bug. +#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590) +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public: +#else +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \ + template friend class tuple; \ + private: +#endif + +// GTEST_n_TUPLE_(T) is the type of an n-tuple. +#define GTEST_0_TUPLE_(T) tuple<> +#define GTEST_1_TUPLE_(T) tuple +#define GTEST_2_TUPLE_(T) tuple +#define GTEST_3_TUPLE_(T) tuple +#define GTEST_4_TUPLE_(T) tuple +#define GTEST_5_TUPLE_(T) tuple +#define GTEST_6_TUPLE_(T) tuple +#define GTEST_7_TUPLE_(T) tuple +#define GTEST_8_TUPLE_(T) tuple +#define GTEST_9_TUPLE_(T) tuple +#define GTEST_10_TUPLE_(T) tuple + +// GTEST_n_TYPENAMES_(T) declares a list of n typenames. +#define GTEST_0_TYPENAMES_(T) +#define GTEST_1_TYPENAMES_(T) typename T##0 +#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1 +#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2 +#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3 +#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4 +#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5 +#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6 +#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, typename T##7 +#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8 +#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8, typename T##9 + +// In theory, defining stuff in the ::std namespace is undefined +// behavior. We can do this as we are playing the role of a standard +// library vendor. +namespace std { +namespace tr1 { + +template +class tuple; + +// Anything in namespace gtest_internal is Google Test's INTERNAL +// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code. +namespace gtest_internal { + +// ByRef::type is T if T is a reference; otherwise it's const T&. +template +struct ByRef { typedef const T& type; }; // NOLINT +template +struct ByRef { typedef T& type; }; // NOLINT + +// A handy wrapper for ByRef. +#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef::type + +// AddRef::type is T if T is a reference; otherwise it's T&. This +// is the same as tr1::add_reference::type. +template +struct AddRef { typedef T& type; }; // NOLINT +template +struct AddRef { typedef T& type; }; // NOLINT + +// A handy wrapper for AddRef. +#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef::type + +// A helper for implementing get(). +template class Get; + +// A helper for implementing tuple_element. kIndexValid is true +// iff k < the number of fields in tuple type T. +template +struct TupleElement; + +template +struct TupleElement { typedef T0 type; }; + +template +struct TupleElement { typedef T1 type; }; + +template +struct TupleElement { typedef T2 type; }; + +template +struct TupleElement { typedef T3 type; }; + +template +struct TupleElement { typedef T4 type; }; + +template +struct TupleElement { typedef T5 type; }; + +template +struct TupleElement { typedef T6 type; }; + +template +struct TupleElement { typedef T7 type; }; + +template +struct TupleElement { typedef T8 type; }; + +template +struct TupleElement { typedef T9 type; }; + +} // namespace gtest_internal + +template <> +class tuple<> { + public: + tuple() {} + tuple(const tuple& /* t */) {} + tuple& operator=(const tuple& /* t */) { return *this; } +}; + +template +class GTEST_1_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {} + + tuple(const tuple& t) : f0_(t.f0_) {} + + template + tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_1_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) { + f0_ = t.f0_; + return *this; + } + + T0 f0_; +}; + +template +class GTEST_2_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0), + f1_(f1) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {} + + template + tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {} + template + tuple(const ::std::pair& p) : f0_(p.first), f1_(p.second) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_2_TUPLE_(U)& t) { + return CopyFrom(t); + } + template + tuple& operator=(const ::std::pair& p) { + f0_ = p.first; + f1_ = p.second; + return *this; + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + return *this; + } + + T0 f0_; + T1 f1_; +}; + +template +class GTEST_3_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + template + tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_3_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; +}; + +template +class GTEST_4_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {} + + template + tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_4_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; +}; + +template +class GTEST_5_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, + GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_) {} + + template + tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_5_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; +}; + +template +class GTEST_6_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_) {} + + template + tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_6_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; +}; + +template +class GTEST_7_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + template + tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_7_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; +}; + +template +class GTEST_8_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, + GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + template + tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_8_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; +}; + +template +class GTEST_9_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7), f8_(f8) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + template + tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_9_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; +}; + +template +class tuple { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(), + f9_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {} + + template + tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), + f9_(t.f9_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_10_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + f9_ = t.f9_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; + T9 f9_; +}; + +// 6.1.3.2 Tuple creation functions. + +// Known limitations: we don't support passing an +// std::tr1::reference_wrapper to make_tuple(). And we don't +// implement tie(). + +inline tuple<> make_tuple() { return tuple<>(); } + +template +inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) { + return GTEST_1_TUPLE_(T)(f0); +} + +template +inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) { + return GTEST_2_TUPLE_(T)(f0, f1); +} + +template +inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) { + return GTEST_3_TUPLE_(T)(f0, f1, f2); +} + +template +inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3) { + return GTEST_4_TUPLE_(T)(f0, f1, f2, f3); +} + +template +inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4) { + return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4); +} + +template +inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5) { + return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5); +} + +template +inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6) { + return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6); +} + +template +inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) { + return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7); +} + +template +inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8) { + return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8); +} + +template +inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8, const T9& f9) { + return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9); +} + +// 6.1.3.3 Tuple helper classes. + +template struct tuple_size; + +template +struct tuple_size { static const int value = 0; }; + +template +struct tuple_size { static const int value = 1; }; + +template +struct tuple_size { static const int value = 2; }; + +template +struct tuple_size { static const int value = 3; }; + +template +struct tuple_size { static const int value = 4; }; + +template +struct tuple_size { static const int value = 5; }; + +template +struct tuple_size { static const int value = 6; }; + +template +struct tuple_size { static const int value = 7; }; + +template +struct tuple_size { static const int value = 8; }; + +template +struct tuple_size { static const int value = 9; }; + +template +struct tuple_size { static const int value = 10; }; + +template +struct tuple_element { + typedef typename gtest_internal::TupleElement< + k < (tuple_size::value), k, Tuple>::type type; +}; + +#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element::type + +// 6.1.3.4 Element access. + +namespace gtest_internal { + +template <> +class Get<0> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + Field(Tuple& t) { return t.f0_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + ConstField(const Tuple& t) { return t.f0_; } +}; + +template <> +class Get<1> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + Field(Tuple& t) { return t.f1_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + ConstField(const Tuple& t) { return t.f1_; } +}; + +template <> +class Get<2> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + Field(Tuple& t) { return t.f2_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + ConstField(const Tuple& t) { return t.f2_; } +}; + +template <> +class Get<3> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + Field(Tuple& t) { return t.f3_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + ConstField(const Tuple& t) { return t.f3_; } +}; + +template <> +class Get<4> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + Field(Tuple& t) { return t.f4_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + ConstField(const Tuple& t) { return t.f4_; } +}; + +template <> +class Get<5> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + Field(Tuple& t) { return t.f5_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + ConstField(const Tuple& t) { return t.f5_; } +}; + +template <> +class Get<6> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + Field(Tuple& t) { return t.f6_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + ConstField(const Tuple& t) { return t.f6_; } +}; + +template <> +class Get<7> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + Field(Tuple& t) { return t.f7_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + ConstField(const Tuple& t) { return t.f7_; } +}; + +template <> +class Get<8> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + Field(Tuple& t) { return t.f8_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + ConstField(const Tuple& t) { return t.f8_; } +}; + +template <> +class Get<9> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + Field(Tuple& t) { return t.f9_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + ConstField(const Tuple& t) { return t.f9_; } +}; + +} // namespace gtest_internal + +template +GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::Field(t); +} + +template +GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(const GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::ConstField(t); +} + +// 6.1.3.5 Relational operators + +// We only implement == and !=, as we don't have a need for the rest yet. + +namespace gtest_internal { + +// SameSizeTuplePrefixComparator::Eq(t1, t2) returns true if the +// first k fields of t1 equals the first k fields of t2. +// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if +// k1 != k2. +template +struct SameSizeTuplePrefixComparator; + +template <> +struct SameSizeTuplePrefixComparator<0, 0> { + template + static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) { + return true; + } +}; + +template +struct SameSizeTuplePrefixComparator { + template + static bool Eq(const Tuple1& t1, const Tuple2& t2) { + return SameSizeTuplePrefixComparator::Eq(t1, t2) && + ::std::tr1::get(t1) == ::std::tr1::get(t2); + } +}; + +} // namespace gtest_internal + +template +inline bool operator==(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { + return gtest_internal::SameSizeTuplePrefixComparator< + tuple_size::value, + tuple_size::value>::Eq(t, u); +} + +template +inline bool operator!=(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { return !(t == u); } + +// 6.1.4 Pairs. +// Unimplemented. + +} // namespace tr1 +} // namespace std + +#undef GTEST_0_TUPLE_ +#undef GTEST_1_TUPLE_ +#undef GTEST_2_TUPLE_ +#undef GTEST_3_TUPLE_ +#undef GTEST_4_TUPLE_ +#undef GTEST_5_TUPLE_ +#undef GTEST_6_TUPLE_ +#undef GTEST_7_TUPLE_ +#undef GTEST_8_TUPLE_ +#undef GTEST_9_TUPLE_ +#undef GTEST_10_TUPLE_ + +#undef GTEST_0_TYPENAMES_ +#undef GTEST_1_TYPENAMES_ +#undef GTEST_2_TYPENAMES_ +#undef GTEST_3_TYPENAMES_ +#undef GTEST_4_TYPENAMES_ +#undef GTEST_5_TYPENAMES_ +#undef GTEST_6_TYPENAMES_ +#undef GTEST_7_TYPENAMES_ +#undef GTEST_8_TYPENAMES_ +#undef GTEST_9_TYPENAMES_ +#undef GTEST_10_TYPENAMES_ + +#undef GTEST_DECLARE_TUPLE_AS_FRIEND_ +#undef GTEST_BY_REF_ +#undef GTEST_ADD_REF_ +#undef GTEST_TUPLE_ELEMENT_ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +# elif GTEST_OS_SYMBIAN + +// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to +// use STLport's tuple implementation, which unfortunately doesn't +// work as the copy of STLport distributed with Symbian is incomplete. +// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to +// use its own tuple implementation. +# ifdef BOOST_HAS_TR1_TUPLE +# undef BOOST_HAS_TR1_TUPLE +# endif // BOOST_HAS_TR1_TUPLE + +// This prevents , which defines +// BOOST_HAS_TR1_TUPLE, from being #included by Boost's . +# define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED +# include + +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000) +// GCC 4.0+ implements tr1/tuple in the header. This does +// not conform to the TR1 spec, which requires the header to be . + +# if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 +// Until version 4.3.2, gcc has a bug that causes , +// which is #included by , to not compile when RTTI is +// disabled. _TR1_FUNCTIONAL is the header guard for +// . Hence the following #define is a hack to prevent +// from being included. +# define _TR1_FUNCTIONAL 1 +# include +# undef _TR1_FUNCTIONAL // Allows the user to #include + // if he chooses to. +# else +# include // NOLINT +# endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 + +# else +// If the compiler is not GCC 4.0+, we assume the user is using a +// spec-conforming TR1 implementation. +# include // NOLINT +# endif // GTEST_USE_OWN_TR1_TUPLE + +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether clone(2) is supported. +// Usually it will only be available on Linux, excluding +// Linux on the Itanium architecture. +// Also see http://linux.die.net/man/2/clone. +#ifndef GTEST_HAS_CLONE +// The user didn't tell us, so we need to figure it out. + +# if GTEST_OS_LINUX && !defined(__ia64__) +# define GTEST_HAS_CLONE 1 +# else +# define GTEST_HAS_CLONE 0 +# endif // GTEST_OS_LINUX && !defined(__ia64__) + +#endif // GTEST_HAS_CLONE + +// Determines whether to support stream redirection. This is used to test +// output correctness and to implement death tests. +#ifndef GTEST_HAS_STREAM_REDIRECTION +// By default, we assume that stream redirection is supported on all +// platforms except known mobile ones. +# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN +# define GTEST_HAS_STREAM_REDIRECTION 0 +# else +# define GTEST_HAS_STREAM_REDIRECTION 1 +# endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN +#endif // GTEST_HAS_STREAM_REDIRECTION + +// Determines whether to support death tests. +// Google Test does not support death tests for VC 7.1 and earlier as +// abort() in a VC 7.1 application compiled as GUI in debug config +// pops up a dialog window that cannot be suppressed programmatically. +#if (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ + (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \ + GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX) +# define GTEST_HAS_DEATH_TEST 1 +# include // NOLINT +#endif + +// We don't support MSVC 7.1 with exceptions disabled now. Therefore +// all the compilers we care about are adequate for supporting +// value-parameterized tests. +#define GTEST_HAS_PARAM_TEST 1 + +// Determines whether to support type-driven tests. + +// Typed tests need and variadic macros, which GCC, VC++ 8.0, +// Sun Pro CC, IBM Visual Age, and HP aCC support. +#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \ + defined(__IBMCPP__) || defined(__HP_aCC) +# define GTEST_HAS_TYPED_TEST 1 +# define GTEST_HAS_TYPED_TEST_P 1 +#endif + +// Determines whether to support Combine(). This only makes sense when +// value-parameterized tests are enabled. The implementation doesn't +// work on Sun Studio since it doesn't understand templated conversion +// operators. +#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC) +# define GTEST_HAS_COMBINE 1 +#endif + +// Determines whether the system compiler uses UTF-16 for encoding wide strings. +#define GTEST_WIDE_STRING_USES_UTF16_ \ + (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX) + +// Determines whether test results can be streamed to a socket. +#if GTEST_OS_LINUX +# define GTEST_CAN_STREAM_RESULTS_ 1 +#endif + +// Defines some utility macros. + +// The GNU compiler emits a warning if nested "if" statements are followed by +// an "else" statement and braces are not used to explicitly disambiguate the +// "else" binding. This leads to problems with code like: +// +// if (gate) +// ASSERT_*(condition) << "Some message"; +// +// The "switch (0) case 0:" idiom is used to suppress this. +#ifdef __INTEL_COMPILER +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ +#else +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT +#endif + +// Use this annotation at the end of a struct/class definition to +// prevent the compiler from optimizing away instances that are never +// used. This is useful when all interesting logic happens inside the +// c'tor and / or d'tor. Example: +// +// struct Foo { +// Foo() { ... } +// } GTEST_ATTRIBUTE_UNUSED_; +// +// Also use it after a variable or parameter declaration to tell the +// compiler the variable/parameter does not have to be used. +#if defined(__GNUC__) && !defined(COMPILER_ICC) +# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +#else +# define GTEST_ATTRIBUTE_UNUSED_ +#endif + +// A macro to disallow operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_ASSIGN_(type)\ + void operator=(type const &) + +// A macro to disallow copy constructor and operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\ + type(type const &);\ + GTEST_DISALLOW_ASSIGN_(type) + +// Tell the compiler to warn about unused return values for functions declared +// with this macro. The macro should be used on function declarations +// following the argument list: +// +// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_; +#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC) +# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result)) +#else +# define GTEST_MUST_USE_RESULT_ +#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC + +// Determine whether the compiler supports Microsoft's Structured Exception +// Handling. This is supported by several Windows compilers but generally +// does not exist on any other system. +#ifndef GTEST_HAS_SEH +// The user didn't tell us, so we need to figure it out. + +# if defined(_MSC_VER) || defined(__BORLANDC__) +// These two compilers are known to support SEH. +# define GTEST_HAS_SEH 1 +# else +// Assume no SEH. +# define GTEST_HAS_SEH 0 +# endif + +#endif // GTEST_HAS_SEH + +#ifdef _MSC_VER + +# if GTEST_LINKED_AS_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllimport) +# elif GTEST_CREATE_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllexport) +# endif + +#endif // _MSC_VER + +#ifndef GTEST_API_ +# define GTEST_API_ +#endif + +#ifdef __GNUC__ +// Ask the compiler to never inline a given function. +# define GTEST_NO_INLINE_ __attribute__((noinline)) +#else +# define GTEST_NO_INLINE_ +#endif + +namespace testing { + +class Message; + +namespace internal { + +class String; + +// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time +// expression is true. For example, you could use it to verify the +// size of a static array: +// +// GTEST_COMPILE_ASSERT_(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES, +// content_type_names_incorrect_size); +// +// or to make sure a struct is smaller than a certain size: +// +// GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large); +// +// The second argument to the macro is the name of the variable. If +// the expression is false, most compilers will issue a warning/error +// containing the name of the variable. + +template +struct CompileAssert { +}; + +#define GTEST_COMPILE_ASSERT_(expr, msg) \ + typedef ::testing::internal::CompileAssert<(bool(expr))> \ + msg[bool(expr) ? 1 : -1] + +// Implementation details of GTEST_COMPILE_ASSERT_: +// +// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1 +// elements (and thus is invalid) when the expression is false. +// +// - The simpler definition +// +// #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1] +// +// does not work, as gcc supports variable-length arrays whose sizes +// are determined at run-time (this is gcc's extension and not part +// of the C++ standard). As a result, gcc fails to reject the +// following code with the simple definition: +// +// int foo; +// GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is +// // not a compile-time constant. +// +// - By using the type CompileAssert<(bool(expr))>, we ensures that +// expr is a compile-time constant. (Template arguments must be +// determined at compile-time.) +// +// - The outter parentheses in CompileAssert<(bool(expr))> are necessary +// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written +// +// CompileAssert +// +// instead, these compilers will refuse to compile +// +// GTEST_COMPILE_ASSERT_(5 > 0, some_message); +// +// (They seem to think the ">" in "5 > 0" marks the end of the +// template argument list.) +// +// - The array size is (bool(expr) ? 1 : -1), instead of simply +// +// ((expr) ? 1 : -1). +// +// This is to avoid running into a bug in MS VC 7.1, which +// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1. + +// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h. +// +// This template is declared, but intentionally undefined. +template +struct StaticAssertTypeEqHelper; + +template +struct StaticAssertTypeEqHelper {}; + +#if GTEST_HAS_GLOBAL_STRING +typedef ::string string; +#else +typedef ::std::string string; +#endif // GTEST_HAS_GLOBAL_STRING + +#if GTEST_HAS_GLOBAL_WSTRING +typedef ::wstring wstring; +#elif GTEST_HAS_STD_WSTRING +typedef ::std::wstring wstring; +#endif // GTEST_HAS_GLOBAL_WSTRING + +// A helper for suppressing warnings on constant condition. It just +// returns 'condition'. +GTEST_API_ bool IsTrue(bool condition); + +// Defines scoped_ptr. + +// This implementation of scoped_ptr is PARTIAL - it only contains +// enough stuff to satisfy Google Test's need. +template +class scoped_ptr { + public: + typedef T element_type; + + explicit scoped_ptr(T* p = NULL) : ptr_(p) {} + ~scoped_ptr() { reset(); } + + T& operator*() const { return *ptr_; } + T* operator->() const { return ptr_; } + T* get() const { return ptr_; } + + T* release() { + T* const ptr = ptr_; + ptr_ = NULL; + return ptr; + } + + void reset(T* p = NULL) { + if (p != ptr_) { + if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type. + delete ptr_; + } + ptr_ = p; + } + } + private: + T* ptr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr); +}; + +// Defines RE. + +// A simple C++ wrapper for . It uses the POSIX Extended +// Regular Expression syntax. +class GTEST_API_ RE { + public: + // A copy constructor is required by the Standard to initialize object + // references from r-values. + RE(const RE& other) { Init(other.pattern()); } + + // Constructs an RE from a string. + RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT + +#if GTEST_HAS_GLOBAL_STRING + + RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT + +#endif // GTEST_HAS_GLOBAL_STRING + + RE(const char* regex) { Init(regex); } // NOLINT + ~RE(); + + // Returns the string representation of the regex. + const char* pattern() const { return pattern_; } + + // FullMatch(str, re) returns true iff regular expression re matches + // the entire str. + // PartialMatch(str, re) returns true iff regular expression re + // matches a substring of str (including str itself). + // + // TODO(wan@google.com): make FullMatch() and PartialMatch() work + // when str contains NUL characters. + static bool FullMatch(const ::std::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::std::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#if GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const ::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#endif // GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const char* str, const RE& re); + static bool PartialMatch(const char* str, const RE& re); + + private: + void Init(const char* regex); + + // We use a const char* instead of a string, as Google Test may be used + // where string is not available. We also do not use Google Test's own + // String type here, in order to simplify dependencies between the + // files. + const char* pattern_; + bool is_valid_; + +#if GTEST_USES_POSIX_RE + + regex_t full_regex_; // For FullMatch(). + regex_t partial_regex_; // For PartialMatch(). + +#else // GTEST_USES_SIMPLE_RE + + const char* full_pattern_; // For FullMatch(); + +#endif + + GTEST_DISALLOW_ASSIGN_(RE); +}; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line); + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file, + int line); + +// Defines logging utilities: +// GTEST_LOG_(severity) - logs messages at the specified severity level. The +// message itself is streamed into the macro. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. + +enum GTestLogSeverity { + GTEST_INFO, + GTEST_WARNING, + GTEST_ERROR, + GTEST_FATAL +}; + +// Formats log entry severity, provides a stream object for streaming the +// log message, and terminates the message with a newline when going out of +// scope. +class GTEST_API_ GTestLog { + public: + GTestLog(GTestLogSeverity severity, const char* file, int line); + + // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. + ~GTestLog(); + + ::std::ostream& GetStream() { return ::std::cerr; } + + private: + const GTestLogSeverity severity_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog); +}; + +#define GTEST_LOG_(severity) \ + ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \ + __FILE__, __LINE__).GetStream() + +inline void LogToStderr() {} +inline void FlushInfoLog() { fflush(NULL); } + +// INTERNAL IMPLEMENTATION - DO NOT USE. +// +// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition +// is not satisfied. +// Synopsys: +// GTEST_CHECK_(boolean_condition); +// or +// GTEST_CHECK_(boolean_condition) << "Additional message"; +// +// This checks the condition and if the condition is not satisfied +// it prints message about the condition violation, including the +// condition itself, plus additional message streamed into it, if any, +// and then it aborts the program. It aborts the program irrespective of +// whether it is built in the debug mode or not. +#define GTEST_CHECK_(condition) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::IsTrue(condition)) \ + ; \ + else \ + GTEST_LOG_(FATAL) << "Condition " #condition " failed. " + +// An all-mode assert to verify that the given POSIX-style function +// call returns 0 (indicating success). Known limitation: this +// doesn't expand to a balanced 'if' statement, so enclose the macro +// in {} if you need to use it as the only statement in an 'if' +// branch. +#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \ + if (const int gtest_error = (posix_call)) \ + GTEST_LOG_(FATAL) << #posix_call << "failed with error " \ + << gtest_error + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Use ImplicitCast_ as a safe version of static_cast for upcasting in +// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a +// const Foo*). When you use ImplicitCast_, the compiler checks that +// the cast is safe. Such explicit ImplicitCast_s are necessary in +// surprisingly many situations where C++ demands an exact type match +// instead of an argument type convertable to a target type. +// +// The syntax for using ImplicitCast_ is the same as for static_cast: +// +// ImplicitCast_(expr) +// +// ImplicitCast_ would have been part of the C++ standard library, +// but the proposal was submitted too late. It will probably make +// its way into the language in the future. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., implicit_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template +inline To ImplicitCast_(To x) { return x; } + +// When you upcast (that is, cast a pointer from type Foo to type +// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts +// always succeed. When you downcast (that is, cast a pointer from +// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because +// how do you know the pointer is really of type SubclassOfFoo? It +// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus, +// when you downcast, you should use this macro. In debug mode, we +// use dynamic_cast<> to double-check the downcast is legal (we die +// if it's not). In normal mode, we do the efficient static_cast<> +// instead. Thus, it's important to test in debug mode to make sure +// the cast is legal! +// This is the only place in the code we should use dynamic_cast<>. +// In particular, you SHOULDN'T be using dynamic_cast<> in order to +// do RTTI (eg code like this: +// if (dynamic_cast(foo)) HandleASubclass1Object(foo); +// if (dynamic_cast(foo)) HandleASubclass2Object(foo); +// You should design the code some other way not to need this. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., down_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template // use like this: DownCast_(foo); +inline To DownCast_(From* f) { // so we only accept pointers + // Ensures that To is a sub-type of From *. This test is here only + // for compile-time type checking, and has no overhead in an + // optimized build at run-time, as it will be optimized away + // completely. + if (false) { + const To to = NULL; + ::testing::internal::ImplicitCast_(to); + } + +#if GTEST_HAS_RTTI + // RTTI: debug mode only! + GTEST_CHECK_(f == NULL || dynamic_cast(f) != NULL); +#endif + return static_cast(f); +} + +// Downcasts the pointer of type Base to Derived. +// Derived must be a subclass of Base. The parameter MUST +// point to a class of type Derived, not any subclass of it. +// When RTTI is available, the function performs a runtime +// check to enforce this. +template +Derived* CheckedDowncastToActualType(Base* base) { +#if GTEST_HAS_RTTI + GTEST_CHECK_(typeid(*base) == typeid(Derived)); + return dynamic_cast(base); // NOLINT +#else + return static_cast(base); // Poor man's downcast. +#endif +} + +#if GTEST_HAS_STREAM_REDIRECTION + +// Defines the stderr capturer: +// CaptureStdout - starts capturing stdout. +// GetCapturedStdout - stops capturing stdout and returns the captured string. +// CaptureStderr - starts capturing stderr. +// GetCapturedStderr - stops capturing stderr and returns the captured string. +// +GTEST_API_ void CaptureStdout(); +GTEST_API_ String GetCapturedStdout(); +GTEST_API_ void CaptureStderr(); +GTEST_API_ String GetCapturedStderr(); + +#endif // GTEST_HAS_STREAM_REDIRECTION + + +#if GTEST_HAS_DEATH_TEST + +// A copy of all command line arguments. Set by InitGoogleTest(). +extern ::std::vector g_argvs; + +// GTEST_HAS_DEATH_TEST implies we have ::std::string. +const ::std::vector& GetArgvs(); + +#endif // GTEST_HAS_DEATH_TEST + +// Defines synchronization primitives. + +#if GTEST_HAS_PTHREAD + +// Sleeps for (roughly) n milli-seconds. This function is only for +// testing Google Test's own constructs. Don't use it in user tests, +// either directly or indirectly. +inline void SleepMilliseconds(int n) { + const timespec time = { + 0, // 0 seconds. + n * 1000L * 1000L, // And n ms. + }; + nanosleep(&time, NULL); +} + +// Allows a controller thread to pause execution of newly created +// threads until notified. Instances of this class must be created +// and destroyed in the controller thread. +// +// This class is only for testing Google Test's own constructs. Do not +// use it in user tests, either directly or indirectly. +class Notification { + public: + Notification() : notified_(false) {} + + // Notifies all threads created with this notification to start. Must + // be called from the controller thread. + void Notify() { notified_ = true; } + + // Blocks until the controller thread notifies. Must be called from a test + // thread. + void WaitForNotification() { + while(!notified_) { + SleepMilliseconds(10); + } + } + + private: + volatile bool notified_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); +}; + +// As a C-function, ThreadFuncWithCLinkage cannot be templated itself. +// Consequently, it cannot select a correct instantiation of ThreadWithParam +// in order to call its Run(). Introducing ThreadWithParamBase as a +// non-templated base class for ThreadWithParam allows us to bypass this +// problem. +class ThreadWithParamBase { + public: + virtual ~ThreadWithParamBase() {} + virtual void Run() = 0; +}; + +// pthread_create() accepts a pointer to a function type with the C linkage. +// According to the Standard (7.5/1), function types with different linkages +// are different even if they are otherwise identical. Some compilers (for +// example, SunStudio) treat them as different types. Since class methods +// cannot be defined with C-linkage we need to define a free C-function to +// pass into pthread_create(). +extern "C" inline void* ThreadFuncWithCLinkage(void* thread) { + static_cast(thread)->Run(); + return NULL; +} + +// Helper class for testing Google Test's multi-threading constructs. +// To use it, write: +// +// void ThreadFunc(int param) { /* Do things with param */ } +// Notification thread_can_start; +// ... +// // The thread_can_start parameter is optional; you can supply NULL. +// ThreadWithParam thread(&ThreadFunc, 5, &thread_can_start); +// thread_can_start.Notify(); +// +// These classes are only for testing Google Test's own constructs. Do +// not use them in user tests, either directly or indirectly. +template +class ThreadWithParam : public ThreadWithParamBase { + public: + typedef void (*UserThreadFunc)(T); + + ThreadWithParam( + UserThreadFunc func, T param, Notification* thread_can_start) + : func_(func), + param_(param), + thread_can_start_(thread_can_start), + finished_(false) { + ThreadWithParamBase* const base = this; + // The thread can be created only after all fields except thread_ + // have been initialized. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base)); + } + ~ThreadWithParam() { Join(); } + + void Join() { + if (!finished_) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0)); + finished_ = true; + } + } + + virtual void Run() { + if (thread_can_start_ != NULL) + thread_can_start_->WaitForNotification(); + func_(param_); + } + + private: + const UserThreadFunc func_; // User-supplied thread function. + const T param_; // User-supplied parameter to the thread function. + // When non-NULL, used to block execution until the controller thread + // notifies. + Notification* const thread_can_start_; + bool finished_; // true iff we know that the thread function has finished. + pthread_t thread_; // The native thread object. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); +}; + +// MutexBase and Mutex implement mutex on pthreads-based platforms. They +// are used in conjunction with class MutexLock: +// +// Mutex mutex; +// ... +// MutexLock lock(&mutex); // Acquires the mutex and releases it at the end +// // of the current scope. +// +// MutexBase implements behavior for both statically and dynamically +// allocated mutexes. Do not use MutexBase directly. Instead, write +// the following to define a static mutex: +// +// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex); +// +// You can forward declare a static mutex like this: +// +// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex); +// +// To create a dynamic mutex, just define an object of type Mutex. +class MutexBase { + public: + // Acquires this mutex. + void Lock() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_)); + owner_ = pthread_self(); + } + + // Releases this mutex. + void Unlock() { + // We don't protect writing to owner_ here, as it's the caller's + // responsibility to ensure that the current thread holds the + // mutex when this is called. + owner_ = 0; + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_)); + } + + // Does nothing if the current thread holds the mutex. Otherwise, crashes + // with high probability. + void AssertHeld() const { + GTEST_CHECK_(owner_ == pthread_self()) + << "The current thread is not holding the mutex @" << this; + } + + // A static mutex may be used before main() is entered. It may even + // be used before the dynamic initialization stage. Therefore we + // must be able to initialize a static mutex object at link time. + // This means MutexBase has to be a POD and its member variables + // have to be public. + public: + pthread_mutex_t mutex_; // The underlying pthread mutex. + pthread_t owner_; // The thread holding the mutex; 0 means no one holds it. +}; + +// Forward-declares a static mutex. +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::MutexBase mutex + +// Defines and statically (i.e. at link time) initializes a static mutex. +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, 0 } + +// The Mutex class can only be used for mutexes created at runtime. It +// shares its API with MutexBase otherwise. +class Mutex : public MutexBase { + public: + Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + owner_ = 0; + } + ~Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); +}; + +// We cannot name this class MutexLock as the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(MutexBase* mutex) + : mutex_(mutex) { mutex_->Lock(); } + + ~GTestMutexLock() { mutex_->Unlock(); } + + private: + MutexBase* const mutex_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); +}; + +typedef GTestMutexLock MutexLock; + +// Helpers for ThreadLocal. + +// pthread_key_create() requires DeleteThreadLocalValue() to have +// C-linkage. Therefore it cannot be templatized to access +// ThreadLocal. Hence the need for class +// ThreadLocalValueHolderBase. +class ThreadLocalValueHolderBase { + public: + virtual ~ThreadLocalValueHolderBase() {} +}; + +// Called by pthread to delete thread-local data stored by +// pthread_setspecific(). +extern "C" inline void DeleteThreadLocalValue(void* value_holder) { + delete static_cast(value_holder); +} + +// Implements thread-local storage on pthreads-based systems. +// +// // Thread 1 +// ThreadLocal tl(100); // 100 is the default value for each thread. +// +// // Thread 2 +// tl.set(150); // Changes the value for thread 2 only. +// EXPECT_EQ(150, tl.get()); +// +// // Thread 1 +// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value. +// tl.set(200); +// EXPECT_EQ(200, tl.get()); +// +// The template type argument T must have a public copy constructor. +// In addition, the default ThreadLocal constructor requires T to have +// a public default constructor. +// +// An object managed for a thread by a ThreadLocal instance is deleted +// when the thread exits. Or, if the ThreadLocal instance dies in +// that thread, when the ThreadLocal dies. It's the user's +// responsibility to ensure that all other threads using a ThreadLocal +// have exited when it dies, or the per-thread objects for those +// threads will not be deleted. +// +// Google Test only uses global ThreadLocal objects. That means they +// will die after main() has returned. Therefore, no per-thread +// object managed by Google Test will be leaked as long as all threads +// using Google Test have exited when main() returns. +template +class ThreadLocal { + public: + ThreadLocal() : key_(CreateKey()), + default_() {} + explicit ThreadLocal(const T& value) : key_(CreateKey()), + default_(value) {} + + ~ThreadLocal() { + // Destroys the managed object for the current thread, if any. + DeleteThreadLocalValue(pthread_getspecific(key_)); + + // Releases resources associated with the key. This will *not* + // delete managed objects for other threads. + GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_)); + } + + T* pointer() { return GetOrCreateValue(); } + const T* pointer() const { return GetOrCreateValue(); } + const T& get() const { return *pointer(); } + void set(const T& value) { *pointer() = value; } + + private: + // Holds a value of type T. + class ValueHolder : public ThreadLocalValueHolderBase { + public: + explicit ValueHolder(const T& value) : value_(value) {} + + T* pointer() { return &value_; } + + private: + T value_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + }; + + static pthread_key_t CreateKey() { + pthread_key_t key; + // When a thread exits, DeleteThreadLocalValue() will be called on + // the object managed for that thread. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_key_create(&key, &DeleteThreadLocalValue)); + return key; + } + + T* GetOrCreateValue() const { + ThreadLocalValueHolderBase* const holder = + static_cast(pthread_getspecific(key_)); + if (holder != NULL) { + return CheckedDowncastToActualType(holder)->pointer(); + } + + ValueHolder* const new_holder = new ValueHolder(default_); + ThreadLocalValueHolderBase* const holder_base = new_holder; + GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base)); + return new_holder->pointer(); + } + + // A key pthreads uses for looking up per-thread values. + const pthread_key_t key_; + const T default_; // The default value for each thread. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); +}; + +# define GTEST_IS_THREADSAFE 1 + +#else // GTEST_HAS_PTHREAD + +// A dummy implementation of synchronization primitives (mutex, lock, +// and thread-local variable). Necessary for compiling Google Test where +// mutex is not supported - using Google Test in multiple threads is not +// supported on such platforms. + +class Mutex { + public: + Mutex() {} + void AssertHeld() const {} +}; + +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex + +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex + +class GTestMutexLock { + public: + explicit GTestMutexLock(Mutex*) {} // NOLINT +}; + +typedef GTestMutexLock MutexLock; + +template +class ThreadLocal { + public: + ThreadLocal() : value_() {} + explicit ThreadLocal(const T& value) : value_(value) {} + T* pointer() { return &value_; } + const T* pointer() const { return &value_; } + const T& get() const { return value_; } + void set(const T& value) { value_ = value; } + private: + T value_; +}; + +// The above synchronization primitives have dummy implementations. +// Therefore Google Test is not thread-safe. +# define GTEST_IS_THREADSAFE 0 + +#endif // GTEST_HAS_PTHREAD + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +GTEST_API_ size_t GetThreadCount(); + +// Passing non-POD classes through ellipsis (...) crashes the ARM +// compiler and generates a warning in Sun Studio. The Nokia Symbian +// and the IBM XL C/C++ compiler try to instantiate a copy constructor +// for objects passed through ellipsis (...), failing for uncopyable +// objects. We define this to ensure that only POD is passed through +// ellipsis on these systems. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC) +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_ELLIPSIS_NEEDS_POD_ 1 +#else +# define GTEST_CAN_COMPARE_NULL 1 +#endif + +// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between +// const T& and const T* in a function template. These compilers +// _can_ decide between class template specializations for T and T*, +// so a tr1::type_traits-like is_pointer works. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) +# define GTEST_NEEDS_IS_POINTER_ 1 +#endif + +template +struct bool_constant { + typedef bool_constant type; + static const bool value = bool_value; +}; +template const bool bool_constant::value; + +typedef bool_constant false_type; +typedef bool_constant true_type; + +template +struct is_pointer : public false_type {}; + +template +struct is_pointer : public true_type {}; + +template +struct IteratorTraits { + typedef typename Iterator::value_type value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_SEP_ "\\" +# define GTEST_HAS_ALT_PATH_SEP_ 1 +// The biggest signed integer type the compiler supports. +typedef __int64 BiggestInt; +#else +# define GTEST_PATH_SEP_ "/" +# define GTEST_HAS_ALT_PATH_SEP_ 0 +typedef long long BiggestInt; // NOLINT +#endif // GTEST_OS_WINDOWS + +// Utilities for char. + +// isspace(int ch) and friends accept an unsigned char or EOF. char +// may be signed, depending on the compiler (or compiler flags). +// Therefore we need to cast a char to unsigned char before calling +// isspace(), etc. + +inline bool IsAlpha(char ch) { + return isalpha(static_cast(ch)) != 0; +} +inline bool IsAlNum(char ch) { + return isalnum(static_cast(ch)) != 0; +} +inline bool IsDigit(char ch) { + return isdigit(static_cast(ch)) != 0; +} +inline bool IsLower(char ch) { + return islower(static_cast(ch)) != 0; +} +inline bool IsSpace(char ch) { + return isspace(static_cast(ch)) != 0; +} +inline bool IsUpper(char ch) { + return isupper(static_cast(ch)) != 0; +} +inline bool IsXDigit(char ch) { + return isxdigit(static_cast(ch)) != 0; +} + +inline char ToLower(char ch) { + return static_cast(tolower(static_cast(ch))); +} +inline char ToUpper(char ch) { + return static_cast(toupper(static_cast(ch))); +} + +// The testing::internal::posix namespace holds wrappers for common +// POSIX functions. These wrappers hide the differences between +// Windows/MSVC and POSIX systems. Since some compilers define these +// standard functions as macros, the wrapper cannot have the same name +// as the wrapped function. + +namespace posix { + +// Functions with a different name on Windows. + +#if GTEST_OS_WINDOWS + +typedef struct _stat StatStruct; + +# ifdef __BORLANDC__ +inline int IsATTY(int fd) { return isatty(fd); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +# else // !__BORLANDC__ +# if GTEST_OS_WINDOWS_MOBILE +inline int IsATTY(int /* fd */) { return 0; } +# else +inline int IsATTY(int fd) { return _isatty(fd); } +# endif // GTEST_OS_WINDOWS_MOBILE +inline int StrCaseCmp(const char* s1, const char* s2) { + return _stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return _strdup(src); } +# endif // __BORLANDC__ + +# if GTEST_OS_WINDOWS_MOBILE +inline int FileNo(FILE* file) { return reinterpret_cast(_fileno(file)); } +// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this +// time and thus not defined there. +# else +inline int FileNo(FILE* file) { return _fileno(file); } +inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); } +inline int RmDir(const char* dir) { return _rmdir(dir); } +inline bool IsDir(const StatStruct& st) { + return (_S_IFDIR & st.st_mode) != 0; +} +# endif // GTEST_OS_WINDOWS_MOBILE + +#else + +typedef struct stat StatStruct; + +inline int FileNo(FILE* file) { return fileno(file); } +inline int IsATTY(int fd) { return isatty(fd); } +inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return strcasecmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +inline int RmDir(const char* dir) { return rmdir(dir); } +inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); } + +#endif // GTEST_OS_WINDOWS + +// Functions deprecated by MSVC 8.0. + +#ifdef _MSC_VER +// Temporarily disable warning 4996 (deprecated function). +# pragma warning(push) +# pragma warning(disable:4996) +#endif + +inline const char* StrNCpy(char* dest, const char* src, size_t n) { + return strncpy(dest, src, n); +} + +// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and +// StrError() aren't needed on Windows CE at this time and thus not +// defined there. + +#if !GTEST_OS_WINDOWS_MOBILE +inline int ChDir(const char* dir) { return chdir(dir); } +#endif +inline FILE* FOpen(const char* path, const char* mode) { + return fopen(path, mode); +} +#if !GTEST_OS_WINDOWS_MOBILE +inline FILE *FReopen(const char* path, const char* mode, FILE* stream) { + return freopen(path, mode, stream); +} +inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); } +#endif +inline int FClose(FILE* fp) { return fclose(fp); } +#if !GTEST_OS_WINDOWS_MOBILE +inline int Read(int fd, void* buf, unsigned int count) { + return static_cast(read(fd, buf, count)); +} +inline int Write(int fd, const void* buf, unsigned int count) { + return static_cast(write(fd, buf, count)); +} +inline int Close(int fd) { return close(fd); } +inline const char* StrError(int errnum) { return strerror(errnum); } +#endif +inline const char* GetEnv(const char* name) { +#if GTEST_OS_WINDOWS_MOBILE + // We are on Windows CE, which has no environment variables. + return NULL; +#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9) + // Environment variables which we programmatically clear will be set to the + // empty string rather than unset (NULL). Handle that case. + const char* const env = getenv(name); + return (env != NULL && env[0] != '\0') ? env : NULL; +#else + return getenv(name); +#endif +} + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + +#if GTEST_OS_WINDOWS_MOBILE +// Windows CE has no C library. The abort() function is used in +// several places in Google Test. This implementation provides a reasonable +// imitation of standard behaviour. +void Abort(); +#else +inline void Abort() { abort(); } +#endif // GTEST_OS_WINDOWS_MOBILE + +} // namespace posix + +// The maximum number a BiggestInt can represent. This definition +// works no matter BiggestInt is represented in one's complement or +// two's complement. +// +// We cannot rely on numeric_limits in STL, as __int64 and long long +// are not part of standard C++ and numeric_limits doesn't need to be +// defined for them. +const BiggestInt kMaxBiggestInt = + ~(static_cast(1) << (8*sizeof(BiggestInt) - 1)); + +// This template class serves as a compile-time function from size to +// type. It maps a size in bytes to a primitive type with that +// size. e.g. +// +// TypeWithSize<4>::UInt +// +// is typedef-ed to be unsigned int (unsigned integer made up of 4 +// bytes). +// +// Such functionality should belong to STL, but I cannot find it +// there. +// +// Google Test uses this class in the implementation of floating-point +// comparison. +// +// For now it only handles UInt (unsigned int) as that's all Google Test +// needs. Other types can be easily added in the future if need +// arises. +template +class TypeWithSize { + public: + // This prevents the user from using TypeWithSize with incorrect + // values of N. + typedef void UInt; +}; + +// The specialization for size 4. +template <> +class TypeWithSize<4> { + public: + // unsigned int has size 4 in both gcc and MSVC. + // + // As base/basictypes.h doesn't compile on Windows, we cannot use + // uint32, uint64, and etc here. + typedef int Int; + typedef unsigned int UInt; +}; + +// The specialization for size 8. +template <> +class TypeWithSize<8> { + public: + +#if GTEST_OS_WINDOWS + typedef __int64 Int; + typedef unsigned __int64 UInt; +#else + typedef long long Int; // NOLINT + typedef unsigned long long UInt; // NOLINT +#endif // GTEST_OS_WINDOWS +}; + +// Integer types of known sizes. +typedef TypeWithSize<4>::Int Int32; +typedef TypeWithSize<4>::UInt UInt32; +typedef TypeWithSize<8>::Int Int64; +typedef TypeWithSize<8>::UInt UInt64; +typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds. + +// Utilities for command line flags and environment variables. + +// Macro for referencing flags. +#define GTEST_FLAG(name) FLAGS_gtest_##name + +// Macros for declaring flags. +#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name) +#define GTEST_DECLARE_int32_(name) \ + GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name) +#define GTEST_DECLARE_string_(name) \ + GTEST_API_ extern ::testing::internal::String GTEST_FLAG(name) + +// Macros for defining flags. +#define GTEST_DEFINE_bool_(name, default_val, doc) \ + GTEST_API_ bool GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_int32_(name, default_val, doc) \ + GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_string_(name, default_val, doc) \ + GTEST_API_ ::testing::internal::String GTEST_FLAG(name) = (default_val) + +// Parses 'str' for a 32-bit signed integer. If successful, writes the result +// to *value and returns true; otherwise leaves *value unchanged and returns +// false. +// TODO(chandlerc): Find a better way to refactor flag and environment parsing +// out of both gtest-port.cc and gtest.cc to avoid exporting this utility +// function. +bool ParseInt32(const Message& src_text, const char* str, Int32* value); + +// Parses a bool/Int32/string from the environment variable +// corresponding to the given Google Test flag. +bool BoolFromGTestEnv(const char* flag, bool default_val); +GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val); +const char* StringFromGTestEnv(const char* flag, const char* default_val); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +#if GTEST_OS_LINUX +# include +# include +# include +# include +#endif // GTEST_OS_LINUX + +#include +#include +#include +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares the String class and functions used internally by +// Google Test. They are subject to change without notice. They should not used +// by code external to Google Test. +// +// This header file is #included by . +// It should not be #included by other files. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ + +#ifdef __BORLANDC__ +// string.h is not guaranteed to provide strcpy on C++ Builder. +# include +#endif + +#include + +#include + +namespace testing { +namespace internal { + +// String - a UTF-8 string class. +// +// For historic reasons, we don't use std::string. +// +// TODO(wan@google.com): replace this class with std::string or +// implement it in terms of the latter. +// +// Note that String can represent both NULL and the empty string, +// while std::string cannot represent NULL. +// +// NULL and the empty string are considered different. NULL is less +// than anything (including the empty string) except itself. +// +// This class only provides minimum functionality necessary for +// implementing Google Test. We do not intend to implement a full-fledged +// string class here. +// +// Since the purpose of this class is to provide a substitute for +// std::string on platforms where it cannot be used, we define a copy +// constructor and assignment operators such that we don't need +// conditional compilation in a lot of places. +// +// In order to make the representation efficient, the d'tor of String +// is not virtual. Therefore DO NOT INHERIT FROM String. +class GTEST_API_ String { + public: + // Static utility methods + + // Returns the input enclosed in double quotes if it's not NULL; + // otherwise returns "(null)". For example, "\"Hello\"" is returned + // for input "Hello". + // + // This is useful for printing a C string in the syntax of a literal. + // + // Known issue: escape sequences are not handled yet. + static String ShowCStringQuoted(const char* c_str); + + // Clones a 0-terminated C string, allocating memory using new. The + // caller is responsible for deleting the return value using + // delete[]. Returns the cloned string, or NULL if the input is + // NULL. + // + // This is different from strdup() in string.h, which allocates + // memory using malloc(). + static const char* CloneCString(const char* c_str); + +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be + // able to pass strings to Win32 APIs on CE we need to convert them + // to 'Unicode', UTF-16. + + // Creates a UTF-16 wide string from the given ANSI string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the wide string, or NULL if the + // input is NULL. + // + // The wide string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static LPCWSTR AnsiToUtf16(const char* c_str); + + // Creates an ANSI string from the given wide string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the ANSI string, or NULL if the + // input is NULL. + // + // The returned string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static const char* Utf16ToAnsi(LPCWSTR utf16_str); +#endif + + // Compares two C strings. Returns true iff they have the same content. + // + // Unlike strcmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CStringEquals(const char* lhs, const char* rhs); + + // Converts a wide C string to a String using the UTF-8 encoding. + // NULL will be converted to "(null)". If an error occurred during + // the conversion, "(failed to convert from wide string)" is + // returned. + static String ShowWideCString(const wchar_t* wide_c_str); + + // Similar to ShowWideCString(), except that this function encloses + // the converted string in double quotes. + static String ShowWideCStringQuoted(const wchar_t* wide_c_str); + + // Compares two wide C strings. Returns true iff they have the same + // content. + // + // Unlike wcscmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); + + // Compares two C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike strcasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CaseInsensitiveCStringEquals(const char* lhs, + const char* rhs); + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. + static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs); + + // Formats a list of arguments to a String, using the same format + // spec string as for printf. + // + // We do not use the StringPrintf class as it is not universally + // available. + // + // The result is limited to 4096 characters (including the tailing + // 0). If 4096 characters are not enough to format the input, + // "" is returned. + static String Format(const char* format, ...); + + // C'tors + + // The default c'tor constructs a NULL string. + String() : c_str_(NULL), length_(0) {} + + // Constructs a String by cloning a 0-terminated C string. + String(const char* a_c_str) { // NOLINT + if (a_c_str == NULL) { + c_str_ = NULL; + length_ = 0; + } else { + ConstructNonNull(a_c_str, strlen(a_c_str)); + } + } + + // Constructs a String by copying a given number of chars from a + // buffer. E.g. String("hello", 3) creates the string "hel", + // String("a\0bcd", 4) creates "a\0bc", String(NULL, 0) creates "", + // and String(NULL, 1) results in access violation. + String(const char* buffer, size_t a_length) { + ConstructNonNull(buffer, a_length); + } + + // The copy c'tor creates a new copy of the string. The two + // String objects do not share content. + String(const String& str) : c_str_(NULL), length_(0) { *this = str; } + + // D'tor. String is intended to be a final class, so the d'tor + // doesn't need to be virtual. + ~String() { delete[] c_str_; } + + // Allows a String to be implicitly converted to an ::std::string or + // ::string, and vice versa. Converting a String containing a NULL + // pointer to ::std::string or ::string is undefined behavior. + // Converting a ::std::string or ::string containing an embedded NUL + // character to a String will result in the prefix up to the first + // NUL character. + String(const ::std::string& str) { + ConstructNonNull(str.c_str(), str.length()); + } + + operator ::std::string() const { return ::std::string(c_str(), length()); } + +#if GTEST_HAS_GLOBAL_STRING + String(const ::string& str) { + ConstructNonNull(str.c_str(), str.length()); + } + + operator ::string() const { return ::string(c_str(), length()); } +#endif // GTEST_HAS_GLOBAL_STRING + + // Returns true iff this is an empty string (i.e. ""). + bool empty() const { return (c_str() != NULL) && (length() == 0); } + + // Compares this with another String. + // Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0 + // if this is greater than rhs. + int Compare(const String& rhs) const; + + // Returns true iff this String equals the given C string. A NULL + // string and a non-NULL string are considered not equal. + bool operator==(const char* a_c_str) const { return Compare(a_c_str) == 0; } + + // Returns true iff this String is less than the given String. A + // NULL string is considered less than "". + bool operator<(const String& rhs) const { return Compare(rhs) < 0; } + + // Returns true iff this String doesn't equal the given C string. A NULL + // string and a non-NULL string are considered not equal. + bool operator!=(const char* a_c_str) const { return !(*this == a_c_str); } + + // Returns true iff this String ends with the given suffix. *Any* + // String is considered to end with a NULL or empty suffix. + bool EndsWith(const char* suffix) const; + + // Returns true iff this String ends with the given suffix, not considering + // case. Any String is considered to end with a NULL or empty suffix. + bool EndsWithCaseInsensitive(const char* suffix) const; + + // Returns the length of the encapsulated string, or 0 if the + // string is NULL. + size_t length() const { return length_; } + + // Gets the 0-terminated C string this String object represents. + // The String object still owns the string. Therefore the caller + // should NOT delete the return value. + const char* c_str() const { return c_str_; } + + // Assigns a C string to this object. Self-assignment works. + const String& operator=(const char* a_c_str) { + return *this = String(a_c_str); + } + + // Assigns a String object to this object. Self-assignment works. + const String& operator=(const String& rhs) { + if (this != &rhs) { + delete[] c_str_; + if (rhs.c_str() == NULL) { + c_str_ = NULL; + length_ = 0; + } else { + ConstructNonNull(rhs.c_str(), rhs.length()); + } + } + + return *this; + } + + private: + // Constructs a non-NULL String from the given content. This + // function can only be called when c_str_ has not been allocated. + // ConstructNonNull(NULL, 0) results in an empty string (""). + // ConstructNonNull(NULL, non_zero) is undefined behavior. + void ConstructNonNull(const char* buffer, size_t a_length) { + char* const str = new char[a_length + 1]; + memcpy(str, buffer, a_length); + str[a_length] = '\0'; + c_str_ = str; + length_ = a_length; + } + + const char* c_str_; + size_t length_; +}; // class String + +// Streams a String to an ostream. Each '\0' character in the String +// is replaced with "\\0". +inline ::std::ostream& operator<<(::std::ostream& os, const String& str) { + if (str.c_str() == NULL) { + os << "(null)"; + } else { + const char* const c_str = str.c_str(); + for (size_t i = 0; i != str.length(); i++) { + if (c_str[i] == '\0') { + os << "\\0"; + } else { + os << c_str[i]; + } + } + } + return os; +} + +// Gets the content of the stringstream's buffer as a String. Each '\0' +// character in the buffer is replaced with "\\0". +GTEST_API_ String StringStreamToString(::std::stringstream* stream); + +// Converts a streamable value to a String. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". + +// Declared here but defined in gtest.h, so that it has access +// to the definition of the Message class, required by the ARM +// compiler. +template +String StreamableToString(const T& streamable); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: keith.ray@gmail.com (Keith Ray) +// +// Google Test filepath utilities +// +// This header file declares classes and functions used internally by +// Google Test. They are subject to change without notice. +// +// This file is #included in . +// Do not include this header file separately! + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ + + +namespace testing { +namespace internal { + +// FilePath - a class for file and directory pathname manipulation which +// handles platform-specific conventions (like the pathname separator). +// Used for helper functions for naming files in a directory for xml output. +// Except for Set methods, all methods are const or static, which provides an +// "immutable value object" -- useful for peace of mind. +// A FilePath with a value ending in a path separator ("like/this/") represents +// a directory, otherwise it is assumed to represent a file. In either case, +// it may or may not represent an actual file or directory in the file system. +// Names are NOT checked for syntax correctness -- no checking for illegal +// characters, malformed paths, etc. + +class GTEST_API_ FilePath { + public: + FilePath() : pathname_("") { } + FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } + + explicit FilePath(const char* pathname) : pathname_(pathname) { + Normalize(); + } + + explicit FilePath(const String& pathname) : pathname_(pathname) { + Normalize(); + } + + FilePath& operator=(const FilePath& rhs) { + Set(rhs); + return *this; + } + + void Set(const FilePath& rhs) { + pathname_ = rhs.pathname_; + } + + String ToString() const { return pathname_; } + const char* c_str() const { return pathname_.c_str(); } + + // Returns the current working directory, or "" if unsuccessful. + static FilePath GetCurrentDir(); + + // Given directory = "dir", base_name = "test", number = 0, + // extension = "xml", returns "dir/test.xml". If number is greater + // than zero (e.g., 12), returns "dir/test_12.xml". + // On Windows platform, uses \ as the separator rather than /. + static FilePath MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension); + + // Given directory = "dir", relative_path = "test.xml", + // returns "dir/test.xml". + // On Windows, uses \ as the separator rather than /. + static FilePath ConcatPaths(const FilePath& directory, + const FilePath& relative_path); + + // Returns a pathname for a file that does not currently exist. The pathname + // will be directory/base_name.extension or + // directory/base_name_.extension if directory/base_name.extension + // already exists. The number will be incremented until a pathname is found + // that does not already exist. + // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. + // There could be a race condition if two or more processes are calling this + // function at the same time -- they could both pick the same filename. + static FilePath GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension); + + // Returns true iff the path is NULL or "". + bool IsEmpty() const { return c_str() == NULL || *c_str() == '\0'; } + + // If input name has a trailing separator character, removes it and returns + // the name, otherwise return the name string unmodified. + // On Windows platform, uses \ as the separator, other platforms use /. + FilePath RemoveTrailingPathSeparator() const; + + // Returns a copy of the FilePath with the directory part removed. + // Example: FilePath("path/to/file").RemoveDirectoryName() returns + // FilePath("file"). If there is no directory part ("just_a_file"), it returns + // the FilePath unmodified. If there is no file part ("just_a_dir/") it + // returns an empty FilePath (""). + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveDirectoryName() const; + + // RemoveFileName returns the directory path with the filename removed. + // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". + // If the FilePath is "a_file" or "/a_file", RemoveFileName returns + // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does + // not have a file, like "just/a/dir/", it returns the FilePath unmodified. + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveFileName() const; + + // Returns a copy of the FilePath with the case-insensitive extension removed. + // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns + // FilePath("dir/file"). If a case-insensitive extension is not + // found, returns a copy of the original FilePath. + FilePath RemoveExtension(const char* extension) const; + + // Creates directories so that path exists. Returns true if successful or if + // the directories already exist; returns false if unable to create + // directories for any reason. Will also return false if the FilePath does + // not represent a directory (that is, it doesn't end with a path separator). + bool CreateDirectoriesRecursively() const; + + // Create the directory so that path exists. Returns true if successful or + // if the directory already exists; returns false if unable to create the + // directory for any reason, including if the parent directory does not + // exist. Not named "CreateDirectory" because that's a macro on Windows. + bool CreateFolder() const; + + // Returns true if FilePath describes something in the file-system, + // either a file, directory, or whatever, and that something exists. + bool FileOrDirectoryExists() const; + + // Returns true if pathname describes a directory in the file-system + // that exists. + bool DirectoryExists() const; + + // Returns true if FilePath ends with a path separator, which indicates that + // it is intended to represent a directory. Returns false otherwise. + // This does NOT check that a directory (or file) actually exists. + bool IsDirectory() const; + + // Returns true if pathname describes a root directory. (Windows has one + // root directory per disk drive.) + bool IsRootDirectory() const; + + // Returns true if pathname describes an absolute path. + bool IsAbsolutePath() const; + + private: + // Replaces multiple consecutive separators with a single separator. + // For example, "bar///foo" becomes "bar/foo". Does not eliminate other + // redundancies that might be in a pathname involving "." or "..". + // + // A pathname with multiple consecutive separators may occur either through + // user error or as a result of some scripts or APIs that generate a pathname + // with a trailing separator. On other platforms the same API or script + // may NOT generate a pathname with a trailing "/". Then elsewhere that + // pathname may have another "/" and pathname components added to it, + // without checking for the separator already being there. + // The script language and operating system may allow paths like "foo//bar" + // but some of the functions in FilePath will not handle that correctly. In + // particular, RemoveTrailingPathSeparator() only removes one separator, and + // it is called in CreateDirectoriesRecursively() assuming that it will change + // a pathname from directory syntax (trailing separator) to filename syntax. + // + // On Windows this method also replaces the alternate path separator '/' with + // the primary path separator '\\', so that for example "bar\\/\\foo" becomes + // "bar\\foo". + + void Normalize(); + + // Returns a pointer to the last occurence of a valid path separator in + // the FilePath. On Windows, for example, both '/' and '\' are valid path + // separators. Returns NULL if no path separator was found. + const char* FindLastPathSeparator() const; + + String pathname_; +}; // class FilePath + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +// This file was GENERATED by command: +// pump.py gtest-type-util.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Type utilities needed for implementing typed and type-parameterized +// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently we support at most 50 types in a list, and at most 50 +// type-parameterized tests in one type-parameterized test case. +// Please contact googletestframework@googlegroups.com if you need +// more. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + + +// #ifdef __GNUC__ is too general here. It is possible to use gcc without using +// libstdc++ (which is where cxxabi.h comes from). +# ifdef __GLIBCXX__ +# include +# elif defined(__HP_aCC) +# include +# endif // __GLIBCXX__ + +namespace testing { +namespace internal { + +// GetTypeName() returns a human-readable name of type T. +// NB: This function is also used in Google Mock, so don't move it inside of +// the typed-test-only section below. +template +String GetTypeName() { +# if GTEST_HAS_RTTI + + const char* const name = typeid(T).name(); +# if defined(__GLIBCXX__) || defined(__HP_aCC) + int status = 0; + // gcc's implementation of typeid(T).name() mangles the type name, + // so we have to demangle it. +# ifdef __GLIBCXX__ + using abi::__cxa_demangle; +# endif // __GLIBCXX__ + char* const readable_name = __cxa_demangle(name, 0, 0, &status); + const String name_str(status == 0 ? readable_name : name); + free(readable_name); + return name_str; +# else + return name; +# endif // __GLIBCXX__ || __HP_aCC + +# else + + return ""; + +# endif // GTEST_HAS_RTTI +} + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// AssertyTypeEq::type is defined iff T1 and T2 are the same +// type. This can be used as a compile-time assertion to ensure that +// two types are equal. + +template +struct AssertTypeEq; + +template +struct AssertTypeEq { + typedef bool type; +}; + +// A unique type used as the default value for the arguments of class +// template Types. This allows us to simulate variadic templates +// (e.g. Types, Type, and etc), which C++ doesn't +// support directly. +struct None {}; + +// The following family of struct and struct templates are used to +// represent type lists. In particular, TypesN +// represents a type list with N types (T1, T2, ..., and TN) in it. +// Except for Types0, every struct in the family has two member types: +// Head for the first type in the list, and Tail for the rest of the +// list. + +// The empty type list. +struct Types0 {}; + +// Type lists of length 1, 2, 3, and so on. + +template +struct Types1 { + typedef T1 Head; + typedef Types0 Tail; +}; +template +struct Types2 { + typedef T1 Head; + typedef Types1 Tail; +}; + +template +struct Types3 { + typedef T1 Head; + typedef Types2 Tail; +}; + +template +struct Types4 { + typedef T1 Head; + typedef Types3 Tail; +}; + +template +struct Types5 { + typedef T1 Head; + typedef Types4 Tail; +}; + +template +struct Types6 { + typedef T1 Head; + typedef Types5 Tail; +}; + +template +struct Types7 { + typedef T1 Head; + typedef Types6 Tail; +}; + +template +struct Types8 { + typedef T1 Head; + typedef Types7 Tail; +}; + +template +struct Types9 { + typedef T1 Head; + typedef Types8 Tail; +}; + +template +struct Types10 { + typedef T1 Head; + typedef Types9 Tail; +}; + +template +struct Types11 { + typedef T1 Head; + typedef Types10 Tail; +}; + +template +struct Types12 { + typedef T1 Head; + typedef Types11 Tail; +}; + +template +struct Types13 { + typedef T1 Head; + typedef Types12 Tail; +}; + +template +struct Types14 { + typedef T1 Head; + typedef Types13 Tail; +}; + +template +struct Types15 { + typedef T1 Head; + typedef Types14 Tail; +}; + +template +struct Types16 { + typedef T1 Head; + typedef Types15 Tail; +}; + +template +struct Types17 { + typedef T1 Head; + typedef Types16 Tail; +}; + +template +struct Types18 { + typedef T1 Head; + typedef Types17 Tail; +}; + +template +struct Types19 { + typedef T1 Head; + typedef Types18 Tail; +}; + +template +struct Types20 { + typedef T1 Head; + typedef Types19 Tail; +}; + +template +struct Types21 { + typedef T1 Head; + typedef Types20 Tail; +}; + +template +struct Types22 { + typedef T1 Head; + typedef Types21 Tail; +}; + +template +struct Types23 { + typedef T1 Head; + typedef Types22 Tail; +}; + +template +struct Types24 { + typedef T1 Head; + typedef Types23 Tail; +}; + +template +struct Types25 { + typedef T1 Head; + typedef Types24 Tail; +}; + +template +struct Types26 { + typedef T1 Head; + typedef Types25 Tail; +}; + +template +struct Types27 { + typedef T1 Head; + typedef Types26 Tail; +}; + +template +struct Types28 { + typedef T1 Head; + typedef Types27 Tail; +}; + +template +struct Types29 { + typedef T1 Head; + typedef Types28 Tail; +}; + +template +struct Types30 { + typedef T1 Head; + typedef Types29 Tail; +}; + +template +struct Types31 { + typedef T1 Head; + typedef Types30 Tail; +}; + +template +struct Types32 { + typedef T1 Head; + typedef Types31 Tail; +}; + +template +struct Types33 { + typedef T1 Head; + typedef Types32 Tail; +}; + +template +struct Types34 { + typedef T1 Head; + typedef Types33 Tail; +}; + +template +struct Types35 { + typedef T1 Head; + typedef Types34 Tail; +}; + +template +struct Types36 { + typedef T1 Head; + typedef Types35 Tail; +}; + +template +struct Types37 { + typedef T1 Head; + typedef Types36 Tail; +}; + +template +struct Types38 { + typedef T1 Head; + typedef Types37 Tail; +}; + +template +struct Types39 { + typedef T1 Head; + typedef Types38 Tail; +}; + +template +struct Types40 { + typedef T1 Head; + typedef Types39 Tail; +}; + +template +struct Types41 { + typedef T1 Head; + typedef Types40 Tail; +}; + +template +struct Types42 { + typedef T1 Head; + typedef Types41 Tail; +}; + +template +struct Types43 { + typedef T1 Head; + typedef Types42 Tail; +}; + +template +struct Types44 { + typedef T1 Head; + typedef Types43 Tail; +}; + +template +struct Types45 { + typedef T1 Head; + typedef Types44 Tail; +}; + +template +struct Types46 { + typedef T1 Head; + typedef Types45 Tail; +}; + +template +struct Types47 { + typedef T1 Head; + typedef Types46 Tail; +}; + +template +struct Types48 { + typedef T1 Head; + typedef Types47 Tail; +}; + +template +struct Types49 { + typedef T1 Head; + typedef Types48 Tail; +}; + +template +struct Types50 { + typedef T1 Head; + typedef Types49 Tail; +}; + + +} // namespace internal + +// We don't want to require the users to write TypesN<...> directly, +// as that would require them to count the length. Types<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Types +// will appear as Types in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Types, and Google Test will translate +// that to TypesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Types template. +template +struct Types { + typedef internal::Types50 type; +}; + +template <> +struct Types { + typedef internal::Types0 type; +}; +template +struct Types { + typedef internal::Types1 type; +}; +template +struct Types { + typedef internal::Types2 type; +}; +template +struct Types { + typedef internal::Types3 type; +}; +template +struct Types { + typedef internal::Types4 type; +}; +template +struct Types { + typedef internal::Types5 type; +}; +template +struct Types { + typedef internal::Types6 type; +}; +template +struct Types { + typedef internal::Types7 type; +}; +template +struct Types { + typedef internal::Types8 type; +}; +template +struct Types { + typedef internal::Types9 type; +}; +template +struct Types { + typedef internal::Types10 type; +}; +template +struct Types { + typedef internal::Types11 type; +}; +template +struct Types { + typedef internal::Types12 type; +}; +template +struct Types { + typedef internal::Types13 type; +}; +template +struct Types { + typedef internal::Types14 type; +}; +template +struct Types { + typedef internal::Types15 type; +}; +template +struct Types { + typedef internal::Types16 type; +}; +template +struct Types { + typedef internal::Types17 type; +}; +template +struct Types { + typedef internal::Types18 type; +}; +template +struct Types { + typedef internal::Types19 type; +}; +template +struct Types { + typedef internal::Types20 type; +}; +template +struct Types { + typedef internal::Types21 type; +}; +template +struct Types { + typedef internal::Types22 type; +}; +template +struct Types { + typedef internal::Types23 type; +}; +template +struct Types { + typedef internal::Types24 type; +}; +template +struct Types { + typedef internal::Types25 type; +}; +template +struct Types { + typedef internal::Types26 type; +}; +template +struct Types { + typedef internal::Types27 type; +}; +template +struct Types { + typedef internal::Types28 type; +}; +template +struct Types { + typedef internal::Types29 type; +}; +template +struct Types { + typedef internal::Types30 type; +}; +template +struct Types { + typedef internal::Types31 type; +}; +template +struct Types { + typedef internal::Types32 type; +}; +template +struct Types { + typedef internal::Types33 type; +}; +template +struct Types { + typedef internal::Types34 type; +}; +template +struct Types { + typedef internal::Types35 type; +}; +template +struct Types { + typedef internal::Types36 type; +}; +template +struct Types { + typedef internal::Types37 type; +}; +template +struct Types { + typedef internal::Types38 type; +}; +template +struct Types { + typedef internal::Types39 type; +}; +template +struct Types { + typedef internal::Types40 type; +}; +template +struct Types { + typedef internal::Types41 type; +}; +template +struct Types { + typedef internal::Types42 type; +}; +template +struct Types { + typedef internal::Types43 type; +}; +template +struct Types { + typedef internal::Types44 type; +}; +template +struct Types { + typedef internal::Types45 type; +}; +template +struct Types { + typedef internal::Types46 type; +}; +template +struct Types { + typedef internal::Types47 type; +}; +template +struct Types { + typedef internal::Types48 type; +}; +template +struct Types { + typedef internal::Types49 type; +}; + +namespace internal { + +# define GTEST_TEMPLATE_ template class + +// The template "selector" struct TemplateSel is used to +// represent Tmpl, which must be a class template with one type +// parameter, as a type. TemplateSel::Bind::type is defined +// as the type Tmpl. This allows us to actually instantiate the +// template "selected" by TemplateSel. +// +// This trick is necessary for simulating typedef for class templates, +// which C++ doesn't support directly. +template +struct TemplateSel { + template + struct Bind { + typedef Tmpl type; + }; +}; + +# define GTEST_BIND_(TmplSel, T) \ + TmplSel::template Bind::type + +// A unique struct template used as the default value for the +// arguments of class template Templates. This allows us to simulate +// variadic templates (e.g. Templates, Templates, +// and etc), which C++ doesn't support directly. +template +struct NoneT {}; + +// The following family of struct and struct templates are used to +// represent template lists. In particular, TemplatesN represents a list of N templates (T1, T2, ..., and TN). Except +// for Templates0, every struct in the family has two member types: +// Head for the selector of the first template in the list, and Tail +// for the rest of the list. + +// The empty template list. +struct Templates0 {}; + +// Template lists of length 1, 2, 3, and so on. + +template +struct Templates1 { + typedef TemplateSel Head; + typedef Templates0 Tail; +}; +template +struct Templates2 { + typedef TemplateSel Head; + typedef Templates1 Tail; +}; + +template +struct Templates3 { + typedef TemplateSel Head; + typedef Templates2 Tail; +}; + +template +struct Templates4 { + typedef TemplateSel Head; + typedef Templates3 Tail; +}; + +template +struct Templates5 { + typedef TemplateSel Head; + typedef Templates4 Tail; +}; + +template +struct Templates6 { + typedef TemplateSel Head; + typedef Templates5 Tail; +}; + +template +struct Templates7 { + typedef TemplateSel Head; + typedef Templates6 Tail; +}; + +template +struct Templates8 { + typedef TemplateSel Head; + typedef Templates7 Tail; +}; + +template +struct Templates9 { + typedef TemplateSel Head; + typedef Templates8 Tail; +}; + +template +struct Templates10 { + typedef TemplateSel Head; + typedef Templates9 Tail; +}; + +template +struct Templates11 { + typedef TemplateSel Head; + typedef Templates10 Tail; +}; + +template +struct Templates12 { + typedef TemplateSel Head; + typedef Templates11 Tail; +}; + +template +struct Templates13 { + typedef TemplateSel Head; + typedef Templates12 Tail; +}; + +template +struct Templates14 { + typedef TemplateSel Head; + typedef Templates13 Tail; +}; + +template +struct Templates15 { + typedef TemplateSel Head; + typedef Templates14 Tail; +}; + +template +struct Templates16 { + typedef TemplateSel Head; + typedef Templates15 Tail; +}; + +template +struct Templates17 { + typedef TemplateSel Head; + typedef Templates16 Tail; +}; + +template +struct Templates18 { + typedef TemplateSel Head; + typedef Templates17 Tail; +}; + +template +struct Templates19 { + typedef TemplateSel Head; + typedef Templates18 Tail; +}; + +template +struct Templates20 { + typedef TemplateSel Head; + typedef Templates19 Tail; +}; + +template +struct Templates21 { + typedef TemplateSel Head; + typedef Templates20 Tail; +}; + +template +struct Templates22 { + typedef TemplateSel Head; + typedef Templates21 Tail; +}; + +template +struct Templates23 { + typedef TemplateSel Head; + typedef Templates22 Tail; +}; + +template +struct Templates24 { + typedef TemplateSel Head; + typedef Templates23 Tail; +}; + +template +struct Templates25 { + typedef TemplateSel Head; + typedef Templates24 Tail; +}; + +template +struct Templates26 { + typedef TemplateSel Head; + typedef Templates25 Tail; +}; + +template +struct Templates27 { + typedef TemplateSel Head; + typedef Templates26 Tail; +}; + +template +struct Templates28 { + typedef TemplateSel Head; + typedef Templates27 Tail; +}; + +template +struct Templates29 { + typedef TemplateSel Head; + typedef Templates28 Tail; +}; + +template +struct Templates30 { + typedef TemplateSel Head; + typedef Templates29 Tail; +}; + +template +struct Templates31 { + typedef TemplateSel Head; + typedef Templates30 Tail; +}; + +template +struct Templates32 { + typedef TemplateSel Head; + typedef Templates31 Tail; +}; + +template +struct Templates33 { + typedef TemplateSel Head; + typedef Templates32 Tail; +}; + +template +struct Templates34 { + typedef TemplateSel Head; + typedef Templates33 Tail; +}; + +template +struct Templates35 { + typedef TemplateSel Head; + typedef Templates34 Tail; +}; + +template +struct Templates36 { + typedef TemplateSel Head; + typedef Templates35 Tail; +}; + +template +struct Templates37 { + typedef TemplateSel Head; + typedef Templates36 Tail; +}; + +template +struct Templates38 { + typedef TemplateSel Head; + typedef Templates37 Tail; +}; + +template +struct Templates39 { + typedef TemplateSel Head; + typedef Templates38 Tail; +}; + +template +struct Templates40 { + typedef TemplateSel Head; + typedef Templates39 Tail; +}; + +template +struct Templates41 { + typedef TemplateSel Head; + typedef Templates40 Tail; +}; + +template +struct Templates42 { + typedef TemplateSel Head; + typedef Templates41 Tail; +}; + +template +struct Templates43 { + typedef TemplateSel Head; + typedef Templates42 Tail; +}; + +template +struct Templates44 { + typedef TemplateSel Head; + typedef Templates43 Tail; +}; + +template +struct Templates45 { + typedef TemplateSel Head; + typedef Templates44 Tail; +}; + +template +struct Templates46 { + typedef TemplateSel Head; + typedef Templates45 Tail; +}; + +template +struct Templates47 { + typedef TemplateSel Head; + typedef Templates46 Tail; +}; + +template +struct Templates48 { + typedef TemplateSel Head; + typedef Templates47 Tail; +}; + +template +struct Templates49 { + typedef TemplateSel Head; + typedef Templates48 Tail; +}; + +template +struct Templates50 { + typedef TemplateSel Head; + typedef Templates49 Tail; +}; + + +// We don't want to require the users to write TemplatesN<...> directly, +// as that would require them to count the length. Templates<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Templates +// will appear as Templates in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Templates, and Google Test will translate +// that to TemplatesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Templates template. +template +struct Templates { + typedef Templates50 type; +}; + +template <> +struct Templates { + typedef Templates0 type; +}; +template +struct Templates { + typedef Templates1 type; +}; +template +struct Templates { + typedef Templates2 type; +}; +template +struct Templates { + typedef Templates3 type; +}; +template +struct Templates { + typedef Templates4 type; +}; +template +struct Templates { + typedef Templates5 type; +}; +template +struct Templates { + typedef Templates6 type; +}; +template +struct Templates { + typedef Templates7 type; +}; +template +struct Templates { + typedef Templates8 type; +}; +template +struct Templates { + typedef Templates9 type; +}; +template +struct Templates { + typedef Templates10 type; +}; +template +struct Templates { + typedef Templates11 type; +}; +template +struct Templates { + typedef Templates12 type; +}; +template +struct Templates { + typedef Templates13 type; +}; +template +struct Templates { + typedef Templates14 type; +}; +template +struct Templates { + typedef Templates15 type; +}; +template +struct Templates { + typedef Templates16 type; +}; +template +struct Templates { + typedef Templates17 type; +}; +template +struct Templates { + typedef Templates18 type; +}; +template +struct Templates { + typedef Templates19 type; +}; +template +struct Templates { + typedef Templates20 type; +}; +template +struct Templates { + typedef Templates21 type; +}; +template +struct Templates { + typedef Templates22 type; +}; +template +struct Templates { + typedef Templates23 type; +}; +template +struct Templates { + typedef Templates24 type; +}; +template +struct Templates { + typedef Templates25 type; +}; +template +struct Templates { + typedef Templates26 type; +}; +template +struct Templates { + typedef Templates27 type; +}; +template +struct Templates { + typedef Templates28 type; +}; +template +struct Templates { + typedef Templates29 type; +}; +template +struct Templates { + typedef Templates30 type; +}; +template +struct Templates { + typedef Templates31 type; +}; +template +struct Templates { + typedef Templates32 type; +}; +template +struct Templates { + typedef Templates33 type; +}; +template +struct Templates { + typedef Templates34 type; +}; +template +struct Templates { + typedef Templates35 type; +}; +template +struct Templates { + typedef Templates36 type; +}; +template +struct Templates { + typedef Templates37 type; +}; +template +struct Templates { + typedef Templates38 type; +}; +template +struct Templates { + typedef Templates39 type; +}; +template +struct Templates { + typedef Templates40 type; +}; +template +struct Templates { + typedef Templates41 type; +}; +template +struct Templates { + typedef Templates42 type; +}; +template +struct Templates { + typedef Templates43 type; +}; +template +struct Templates { + typedef Templates44 type; +}; +template +struct Templates { + typedef Templates45 type; +}; +template +struct Templates { + typedef Templates46 type; +}; +template +struct Templates { + typedef Templates47 type; +}; +template +struct Templates { + typedef Templates48 type; +}; +template +struct Templates { + typedef Templates49 type; +}; + +// The TypeList template makes it possible to use either a single type +// or a Types<...> list in TYPED_TEST_CASE() and +// INSTANTIATE_TYPED_TEST_CASE_P(). + +template +struct TypeList { typedef Types1 type; }; + +template +struct TypeList > { + typedef typename Types::type type; +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + +// Due to C++ preprocessor weirdness, we need double indirection to +// concatenate two tokens when one of them is __LINE__. Writing +// +// foo ## __LINE__ +// +// will result in the token foo__LINE__, instead of foo followed by +// the current line number. For more details, see +// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6 +#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) +#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar + +// Google Test defines the testing::Message class to allow construction of +// test messages via the << operator. The idea is that anything +// streamable to std::ostream can be streamed to a testing::Message. +// This allows a user to use his own types in Google Test assertions by +// overloading the << operator. +// +// util/gtl/stl_logging-inl.h overloads << for STL containers. These +// overloads cannot be defined in the std namespace, as that will be +// undefined behavior. Therefore, they are defined in the global +// namespace instead. +// +// C++'s symbol lookup rule (i.e. Koenig lookup) says that these +// overloads are visible in either the std namespace or the global +// namespace, but not other namespaces, including the testing +// namespace which Google Test's Message class is in. +// +// To allow STL containers (and other types that has a << operator +// defined in the global namespace) to be used in Google Test assertions, +// testing::Message must access the custom << operator from the global +// namespace. Hence this helper function. +// +// Note: Jeffrey Yasskin suggested an alternative fix by "using +// ::operator<<;" in the definition of Message's operator<<. That fix +// doesn't require a helper function, but unfortunately doesn't +// compile with MSVC. +template +inline void GTestStreamToHelper(std::ostream* os, const T& val) { + *os << val; +} + +class ProtocolMessage; +namespace proto2 { class Message; } + +namespace testing { + +// Forward declarations. + +class AssertionResult; // Result of an assertion. +class Message; // Represents a failure message. +class Test; // Represents a test. +class TestInfo; // Information about a test. +class TestPartResult; // Result of a test part. +class UnitTest; // A collection of test cases. + +template +::std::string PrintToString(const T& value); + +namespace internal { + +struct TraceInfo; // Information about a trace point. +class ScopedTrace; // Implements scoped trace. +class TestInfoImpl; // Opaque implementation of TestInfo +class UnitTestImpl; // Opaque implementation of UnitTest + +// How many times InitGoogleTest() has been called. +extern int g_init_gtest_count; + +// The text used in failure messages to indicate the start of the +// stack trace. +GTEST_API_ extern const char kStackTraceMarker[]; + +// A secret type that Google Test users don't know about. It has no +// definition on purpose. Therefore it's impossible to create a +// Secret object, which is what we want. +class Secret; + +// Two overloaded helpers for checking at compile time whether an +// expression is a null pointer literal (i.e. NULL or any 0-valued +// compile-time integral constant). Their return values have +// different sizes, so we can use sizeof() to test which version is +// picked by the compiler. These helpers have no implementations, as +// we only need their signatures. +// +// Given IsNullLiteralHelper(x), the compiler will pick the first +// version if x can be implicitly converted to Secret*, and pick the +// second version otherwise. Since Secret is a secret and incomplete +// type, the only expression a user can write that has type Secret* is +// a null pointer literal. Therefore, we know that x is a null +// pointer literal if and only if the first version is picked by the +// compiler. +char IsNullLiteralHelper(Secret* p); +char (&IsNullLiteralHelper(...))[2]; // NOLINT + +// A compile-time bool constant that is true if and only if x is a +// null pointer literal (i.e. NULL or any 0-valued compile-time +// integral constant). +#ifdef GTEST_ELLIPSIS_NEEDS_POD_ +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_IS_NULL_LITERAL_(x) false +#else +# define GTEST_IS_NULL_LITERAL_(x) \ + (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1) +#endif // GTEST_ELLIPSIS_NEEDS_POD_ + +// Appends the user-supplied message to the Google-Test-generated message. +GTEST_API_ String AppendUserMessage(const String& gtest_msg, + const Message& user_msg); + +// A helper class for creating scoped traces in user programs. +class GTEST_API_ ScopedTrace { + public: + // The c'tor pushes the given source file location and message onto + // a trace stack maintained by Google Test. + ScopedTrace(const char* file, int line, const Message& message); + + // The d'tor pops the info pushed by the c'tor. + // + // Note that the d'tor is not virtual in order to be efficient. + // Don't inherit from ScopedTrace! + ~ScopedTrace(); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace); +} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its + // c'tor and d'tor. Therefore it doesn't + // need to be used otherwise. + +// Converts a streamable value to a String. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +// Declared here but defined in gtest.h, so that it has access +// to the definition of the Message class, required by the ARM +// compiler. +template +String StreamableToString(const T& streamable); + +// The Symbian compiler has a bug that prevents it from selecting the +// correct overload of FormatForComparisonFailureMessage (see below) +// unless we pass the first argument by reference. If we do that, +// however, Visual Age C++ 10.1 generates a compiler error. Therefore +// we only apply the work-around for Symbian. +#if defined(__SYMBIAN32__) +# define GTEST_CREF_WORKAROUND_ const& +#else +# define GTEST_CREF_WORKAROUND_ +#endif + +// When this operand is a const char* or char*, if the other operand +// is a ::std::string or ::string, we print this operand as a C string +// rather than a pointer (we do the same for wide strings); otherwise +// we print it as a pointer to be safe. + +// This internal macro is used to avoid duplicated code. +#define GTEST_FORMAT_IMPL_(operand2_type, operand1_printer)\ +inline String FormatForComparisonFailureMessage(\ + operand2_type::value_type* GTEST_CREF_WORKAROUND_ str, \ + const operand2_type& /*operand2*/) {\ + return operand1_printer(str);\ +}\ +inline String FormatForComparisonFailureMessage(\ + const operand2_type::value_type* GTEST_CREF_WORKAROUND_ str, \ + const operand2_type& /*operand2*/) {\ + return operand1_printer(str);\ +} + +GTEST_FORMAT_IMPL_(::std::string, String::ShowCStringQuoted) +#if GTEST_HAS_STD_WSTRING +GTEST_FORMAT_IMPL_(::std::wstring, String::ShowWideCStringQuoted) +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_STRING +GTEST_FORMAT_IMPL_(::string, String::ShowCStringQuoted) +#endif // GTEST_HAS_GLOBAL_STRING +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_FORMAT_IMPL_(::wstring, String::ShowWideCStringQuoted) +#endif // GTEST_HAS_GLOBAL_WSTRING + +#undef GTEST_FORMAT_IMPL_ + +// The next four overloads handle the case where the operand being +// printed is a char/wchar_t pointer and the other operand is not a +// string/wstring object. In such cases, we just print the operand as +// a pointer to be safe. +#define GTEST_FORMAT_CHAR_PTR_IMPL_(CharType) \ + template \ + String FormatForComparisonFailureMessage(CharType* GTEST_CREF_WORKAROUND_ p, \ + const T&) { \ + return PrintToString(static_cast(p)); \ + } + +GTEST_FORMAT_CHAR_PTR_IMPL_(char) +GTEST_FORMAT_CHAR_PTR_IMPL_(const char) +GTEST_FORMAT_CHAR_PTR_IMPL_(wchar_t) +GTEST_FORMAT_CHAR_PTR_IMPL_(const wchar_t) + +#undef GTEST_FORMAT_CHAR_PTR_IMPL_ + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +GTEST_API_ AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const String& expected_value, + const String& actual_value, + bool ignoring_case); + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +GTEST_API_ String GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value); + +// This template class represents an IEEE floating-point number +// (either single-precision or double-precision, depending on the +// template parameters). +// +// The purpose of this class is to do more sophisticated number +// comparison. (Due to round-off error, etc, it's very unlikely that +// two floating-points will be equal exactly. Hence a naive +// comparison by the == operation often doesn't work.) +// +// Format of IEEE floating-point: +// +// The most-significant bit being the leftmost, an IEEE +// floating-point looks like +// +// sign_bit exponent_bits fraction_bits +// +// Here, sign_bit is a single bit that designates the sign of the +// number. +// +// For float, there are 8 exponent bits and 23 fraction bits. +// +// For double, there are 11 exponent bits and 52 fraction bits. +// +// More details can be found at +// http://en.wikipedia.org/wiki/IEEE_floating-point_standard. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +template +class FloatingPoint { + public: + // Defines the unsigned integer type that has the same size as the + // floating point number. + typedef typename TypeWithSize::UInt Bits; + + // Constants. + + // # of bits in a number. + static const size_t kBitCount = 8*sizeof(RawType); + + // # of fraction bits in a number. + static const size_t kFractionBitCount = + std::numeric_limits::digits - 1; + + // # of exponent bits in a number. + static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount; + + // The mask for the sign bit. + static const Bits kSignBitMask = static_cast(1) << (kBitCount - 1); + + // The mask for the fraction bits. + static const Bits kFractionBitMask = + ~static_cast(0) >> (kExponentBitCount + 1); + + // The mask for the exponent bits. + static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask); + + // How many ULP's (Units in the Last Place) we want to tolerate when + // comparing two numbers. The larger the value, the more error we + // allow. A 0 value means that two numbers must be exactly the same + // to be considered equal. + // + // The maximum error of a single floating-point operation is 0.5 + // units in the last place. On Intel CPU's, all floating-point + // calculations are done with 80-bit precision, while double has 64 + // bits. Therefore, 4 should be enough for ordinary use. + // + // See the following article for more details on ULP: + // http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm. + static const size_t kMaxUlps = 4; + + // Constructs a FloatingPoint from a raw floating-point number. + // + // On an Intel CPU, passing a non-normalized NAN (Not a Number) + // around may change its bits, although the new value is guaranteed + // to be also a NAN. Therefore, don't expect this constructor to + // preserve the bits in x when x is a NAN. + explicit FloatingPoint(const RawType& x) { u_.value_ = x; } + + // Static methods + + // Reinterprets a bit pattern as a floating-point number. + // + // This function is needed to test the AlmostEquals() method. + static RawType ReinterpretBits(const Bits bits) { + FloatingPoint fp(0); + fp.u_.bits_ = bits; + return fp.u_.value_; + } + + // Returns the floating-point number that represent positive infinity. + static RawType Infinity() { + return ReinterpretBits(kExponentBitMask); + } + + // Non-static methods + + // Returns the bits that represents this number. + const Bits &bits() const { return u_.bits_; } + + // Returns the exponent bits of this number. + Bits exponent_bits() const { return kExponentBitMask & u_.bits_; } + + // Returns the fraction bits of this number. + Bits fraction_bits() const { return kFractionBitMask & u_.bits_; } + + // Returns the sign bit of this number. + Bits sign_bit() const { return kSignBitMask & u_.bits_; } + + // Returns true iff this is NAN (not a number). + bool is_nan() const { + // It's a NAN if the exponent bits are all ones and the fraction + // bits are not entirely zeros. + return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0); + } + + // Returns true iff this number is at most kMaxUlps ULP's away from + // rhs. In particular, this function: + // + // - returns false if either number is (or both are) NAN. + // - treats really large numbers as almost equal to infinity. + // - thinks +0.0 and -0.0 are 0 DLP's apart. + bool AlmostEquals(const FloatingPoint& rhs) const { + // The IEEE standard says that any comparison operation involving + // a NAN must return false. + if (is_nan() || rhs.is_nan()) return false; + + return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_) + <= kMaxUlps; + } + + private: + // The data type used to store the actual floating-point number. + union FloatingPointUnion { + RawType value_; // The raw floating-point number. + Bits bits_; // The bits that represent the number. + }; + + // Converts an integer from the sign-and-magnitude representation to + // the biased representation. More precisely, let N be 2 to the + // power of (kBitCount - 1), an integer x is represented by the + // unsigned number x + N. + // + // For instance, + // + // -N + 1 (the most negative number representable using + // sign-and-magnitude) is represented by 1; + // 0 is represented by N; and + // N - 1 (the biggest number representable using + // sign-and-magnitude) is represented by 2N - 1. + // + // Read http://en.wikipedia.org/wiki/Signed_number_representations + // for more details on signed number representations. + static Bits SignAndMagnitudeToBiased(const Bits &sam) { + if (kSignBitMask & sam) { + // sam represents a negative number. + return ~sam + 1; + } else { + // sam represents a positive number. + return kSignBitMask | sam; + } + } + + // Given two numbers in the sign-and-magnitude representation, + // returns the distance between them as an unsigned number. + static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1, + const Bits &sam2) { + const Bits biased1 = SignAndMagnitudeToBiased(sam1); + const Bits biased2 = SignAndMagnitudeToBiased(sam2); + return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1); + } + + FloatingPointUnion u_; +}; + +// Typedefs the instances of the FloatingPoint template class that we +// care to use. +typedef FloatingPoint Float; +typedef FloatingPoint Double; + +// In order to catch the mistake of putting tests that use different +// test fixture classes in the same test case, we need to assign +// unique IDs to fixture classes and compare them. The TypeId type is +// used to hold such IDs. The user should treat TypeId as an opaque +// type: the only operation allowed on TypeId values is to compare +// them for equality using the == operator. +typedef const void* TypeId; + +template +class TypeIdHelper { + public: + // dummy_ must not have a const type. Otherwise an overly eager + // compiler (e.g. MSVC 7.1 & 8.0) may try to merge + // TypeIdHelper::dummy_ for different Ts as an "optimization". + static bool dummy_; +}; + +template +bool TypeIdHelper::dummy_ = false; + +// GetTypeId() returns the ID of type T. Different values will be +// returned for different types. Calling the function twice with the +// same type argument is guaranteed to return the same ID. +template +TypeId GetTypeId() { + // The compiler is required to allocate a different + // TypeIdHelper::dummy_ variable for each T used to instantiate + // the template. Therefore, the address of dummy_ is guaranteed to + // be unique. + return &(TypeIdHelper::dummy_); +} + +// Returns the type ID of ::testing::Test. Always call this instead +// of GetTypeId< ::testing::Test>() to get the type ID of +// ::testing::Test, as the latter may give the wrong result due to a +// suspected linker bug when compiling Google Test as a Mac OS X +// framework. +GTEST_API_ TypeId GetTestTypeId(); + +// Defines the abstract factory interface that creates instances +// of a Test object. +class TestFactoryBase { + public: + virtual ~TestFactoryBase() {} + + // Creates a test instance to run. The instance is both created and destroyed + // within TestInfoImpl::Run() + virtual Test* CreateTest() = 0; + + protected: + TestFactoryBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase); +}; + +// This class provides implementation of TeastFactoryBase interface. +// It is used in TEST and TEST_F macros. +template +class TestFactoryImpl : public TestFactoryBase { + public: + virtual Test* CreateTest() { return new TestClass; } +}; + +#if GTEST_OS_WINDOWS + +// Predicate-formatters for implementing the HRESULT checking macros +// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED} +// We pass a long instead of HRESULT to avoid causing an +// include dependency for the HRESULT type. +GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr, + long hr); // NOLINT +GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr, + long hr); // NOLINT + +#endif // GTEST_OS_WINDOWS + +// Types of SetUpTestCase() and TearDownTestCase() functions. +typedef void (*SetUpTestCaseFunc)(); +typedef void (*TearDownTestCaseFunc)(); + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param text representation of the test's value parameter, +// or NULL if this is not a type-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +GTEST_API_ TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory); + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr); + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// State of the definition of a type-parameterized test case. +class GTEST_API_ TypedTestCasePState { + public: + TypedTestCasePState() : registered_(false) {} + + // Adds the given test name to defined_test_names_ and return true + // if the test case hasn't been registered; otherwise aborts the + // program. + bool AddTestName(const char* file, int line, const char* case_name, + const char* test_name) { + if (registered_) { + fprintf(stderr, "%s Test %s must be defined before " + "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n", + FormatFileLocation(file, line).c_str(), test_name, case_name); + fflush(stderr); + posix::Abort(); + } + defined_test_names_.insert(test_name); + return true; + } + + // Verifies that registered_tests match the test names in + // defined_test_names_; returns registered_tests if successful, or + // aborts the program otherwise. + const char* VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests); + + private: + bool registered_; + ::std::set defined_test_names_; +}; + +// Skips to the first non-space char after the first comma in 'str'; +// returns NULL if no comma is found in 'str'. +inline const char* SkipComma(const char* str) { + const char* comma = strchr(str, ','); + if (comma == NULL) { + return NULL; + } + while (IsSpace(*(++comma))) {} + return comma; +} + +// Returns the prefix of 'str' before the first comma in it; returns +// the entire string if it contains no comma. +inline String GetPrefixUntilComma(const char* str) { + const char* comma = strchr(str, ','); + return comma == NULL ? String(str) : String(str, comma - str); +} + +// TypeParameterizedTest::Register() +// registers a list of type-parameterized tests with Google Test. The +// return value is insignificant - we just need to return something +// such that we can call this function in a namespace scope. +// +// Implementation note: The GTEST_TEMPLATE_ macro declares a template +// template parameter. It's defined in gtest-type-util.h. +template +class TypeParameterizedTest { + public: + // 'index' is the index of the test in the type list 'Types' + // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase, + // Types). Valid values for 'index' are [0, N - 1] where N is the + // length of Types. + static bool Register(const char* prefix, const char* case_name, + const char* test_names, int index) { + typedef typename Types::Head Type; + typedef Fixture FixtureClass; + typedef typename GTEST_BIND_(TestSel, Type) TestClass; + + // First, registers the first type-parameterized test in the type + // list. + MakeAndRegisterTestInfo( + String::Format("%s%s%s/%d", prefix, prefix[0] == '\0' ? "" : "/", + case_name, index).c_str(), + GetPrefixUntilComma(test_names).c_str(), + GetTypeName().c_str(), + NULL, // No value parameter. + GetTypeId(), + TestClass::SetUpTestCase, + TestClass::TearDownTestCase, + new TestFactoryImpl); + + // Next, recurses (at compile time) with the tail of the type list. + return TypeParameterizedTest + ::Register(prefix, case_name, test_names, index + 1); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTest { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/, int /*index*/) { + return true; + } +}; + +// TypeParameterizedTestCase::Register() +// registers *all combinations* of 'Tests' and 'Types' with Google +// Test. The return value is insignificant - we just need to return +// something such that we can call this function in a namespace scope. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* prefix, const char* case_name, + const char* test_names) { + typedef typename Tests::Head Head; + + // First, register the first test in 'Test' for each type in 'Types'. + TypeParameterizedTest::Register( + prefix, case_name, test_names, 0); + + // Next, recurses (at compile time) with the tail of the test list. + return TypeParameterizedTestCase + ::Register(prefix, case_name, SkipComma(test_names)); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/) { + return true; + } +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// Returns the current OS stack trace as a String. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +GTEST_API_ String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test, + int skip_count); + +// Helpers for suppressing warnings on unreachable code or constant +// condition. + +// Always returns true. +GTEST_API_ bool AlwaysTrue(); + +// Always returns false. +inline bool AlwaysFalse() { return !AlwaysTrue(); } + +// Helper for suppressing false warning from Clang on a const char* +// variable declared in a conditional expression always being NULL in +// the else branch. +struct GTEST_API_ ConstCharPtr { + ConstCharPtr(const char* str) : value(str) {} + operator bool() const { return true; } + const char* value; +}; + +// A simple Linear Congruential Generator for generating random +// numbers with a uniform distribution. Unlike rand() and srand(), it +// doesn't use global state (and therefore can't interfere with user +// code). Unlike rand_r(), it's portable. An LCG isn't very random, +// but it's good enough for our purposes. +class GTEST_API_ Random { + public: + static const UInt32 kMaxRange = 1u << 31; + + explicit Random(UInt32 seed) : state_(seed) {} + + void Reseed(UInt32 seed) { state_ = seed; } + + // Generates a random number from [0, range). Crashes if 'range' is + // 0 or greater than kMaxRange. + UInt32 Generate(UInt32 range); + + private: + UInt32 state_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(Random); +}; + +// Defining a variable of type CompileAssertTypesEqual will cause a +// compiler error iff T1 and T2 are different types. +template +struct CompileAssertTypesEqual; + +template +struct CompileAssertTypesEqual { +}; + +// Removes the reference from a type if it is a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::remove_reference, which is not widely available yet. +template +struct RemoveReference { typedef T type; }; // NOLINT +template +struct RemoveReference { typedef T type; }; // NOLINT + +// A handy wrapper around RemoveReference that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_REFERENCE_(T) \ + typename ::testing::internal::RemoveReference::type + +// Removes const from a type if it is a const type, otherwise leaves +// it unchanged. This is the same as tr1::remove_const, which is not +// widely available yet. +template +struct RemoveConst { typedef T type; }; // NOLINT +template +struct RemoveConst { typedef T type; }; // NOLINT + +// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above +// definition to fail to remove the const in 'const int[3]' and 'const +// char[3][4]'. The following specialization works around the bug. +// However, it causes trouble with GCC and thus needs to be +// conditionally compiled. +#if defined(_MSC_VER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; +#endif + +// A handy wrapper around RemoveConst that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_CONST_(T) \ + typename ::testing::internal::RemoveConst::type + +// Turns const U&, U&, const U, and U all into U. +#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \ + GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T)) + +// Adds reference to a type if it is not a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::add_reference, which is not widely available yet. +template +struct AddReference { typedef T& type; }; // NOLINT +template +struct AddReference { typedef T& type; }; // NOLINT + +// A handy wrapper around AddReference that works when the argument T +// depends on template parameters. +#define GTEST_ADD_REFERENCE_(T) \ + typename ::testing::internal::AddReference::type + +// Adds a reference to const on top of T as necessary. For example, +// it transforms +// +// char ==> const char& +// const char ==> const char& +// char& ==> const char& +// const char& ==> const char& +// +// The argument T must depend on some template parameters. +#define GTEST_REFERENCE_TO_CONST_(T) \ + GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T)) + +// ImplicitlyConvertible::value is a compile-time bool +// constant that's true iff type From can be implicitly converted to +// type To. +template +class ImplicitlyConvertible { + private: + // We need the following helper functions only for their types. + // They have no implementations. + + // MakeFrom() is an expression whose type is From. We cannot simply + // use From(), as the type From may not have a public default + // constructor. + static From MakeFrom(); + + // These two functions are overloaded. Given an expression + // Helper(x), the compiler will pick the first version if x can be + // implicitly converted to type To; otherwise it will pick the + // second version. + // + // The first version returns a value of size 1, and the second + // version returns a value of size 2. Therefore, by checking the + // size of Helper(x), which can be done at compile time, we can tell + // which version of Helper() is used, and hence whether x can be + // implicitly converted to type To. + static char Helper(To); + static char (&Helper(...))[2]; // NOLINT + + // We have to put the 'public' section after the 'private' section, + // or MSVC refuses to compile the code. + public: + // MSVC warns about implicitly converting from double to int for + // possible loss of data, so we need to temporarily disable the + // warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4244) // Temporarily disables warning 4244. + + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; +# pragma warning(pop) // Restores the warning state. +#elif defined(__BORLANDC__) + // C++Builder cannot use member overload resolution during template + // instantiation. The simplest workaround is to use its C++0x type traits + // functions (C++Builder 2009 and above only). + static const bool value = __is_convertible(From, To); +#else + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; +#endif // _MSV_VER +}; +template +const bool ImplicitlyConvertible::value; + +// IsAProtocolMessage::value is a compile-time bool constant that's +// true iff T is type ProtocolMessage, proto2::Message, or a subclass +// of those. +template +struct IsAProtocolMessage + : public bool_constant< + ImplicitlyConvertible::value || + ImplicitlyConvertible::value> { +}; + +// When the compiler sees expression IsContainerTest(0), if C is an +// STL-style container class, the first overload of IsContainerTest +// will be viable (since both C::iterator* and C::const_iterator* are +// valid types and NULL can be implicitly converted to them). It will +// be picked over the second overload as 'int' is a perfect match for +// the type of argument 0. If C::iterator or C::const_iterator is not +// a valid type, the first overload is not viable, and the second +// overload will be picked. Therefore, we can determine whether C is +// a container class by checking the type of IsContainerTest(0). +// The value of the expression is insignificant. +// +// Note that we look for both C::iterator and C::const_iterator. The +// reason is that C++ injects the name of a class as a member of the +// class itself (e.g. you can refer to class iterator as either +// 'iterator' or 'iterator::iterator'). If we look for C::iterator +// only, for example, we would mistakenly think that a class named +// iterator is an STL container. +// +// Also note that the simpler approach of overloading +// IsContainerTest(typename C::const_iterator*) and +// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++. +typedef int IsContainer; +template +IsContainer IsContainerTest(int /* dummy */, + typename C::iterator* /* it */ = NULL, + typename C::const_iterator* /* const_it */ = NULL) { + return 0; +} + +typedef char IsNotContainer; +template +IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; } + +// EnableIf::type is void when 'Cond' is true, and +// undefined when 'Cond' is false. To use SFINAE to make a function +// overload only apply when a particular expression is true, add +// "typename EnableIf::type* = 0" as the last parameter. +template struct EnableIf; +template<> struct EnableIf { typedef void type; }; // NOLINT + +// Utilities for native arrays. + +// ArrayEq() compares two k-dimensional native arrays using the +// elements' operator==, where k can be any integer >= 0. When k is +// 0, ArrayEq() degenerates into comparing a single pair of values. + +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs); + +// This generic version is used when k is 0. +template +inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; } + +// This overload is used when k >= 1. +template +inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) { + return internal::ArrayEq(lhs, N, rhs); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous ArrayEq() function, arrays with different sizes would +// lead to different copies of the template code. +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs) { + for (size_t i = 0; i != size; i++) { + if (!internal::ArrayEq(lhs[i], rhs[i])) + return false; + } + return true; +} + +// Finds the first element in the iterator range [begin, end) that +// equals elem. Element may be a native array type itself. +template +Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) { + for (Iter it = begin; it != end; ++it) { + if (internal::ArrayEq(*it, elem)) + return it; + } + return end; +} + +// CopyArray() copies a k-dimensional native array using the elements' +// operator=, where k can be any integer >= 0. When k is 0, +// CopyArray() degenerates into copying a single value. + +template +void CopyArray(const T* from, size_t size, U* to); + +// This generic version is used when k is 0. +template +inline void CopyArray(const T& from, U* to) { *to = from; } + +// This overload is used when k >= 1. +template +inline void CopyArray(const T(&from)[N], U(*to)[N]) { + internal::CopyArray(from, N, *to); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous CopyArray() function, arrays with different sizes +// would lead to different copies of the template code. +template +void CopyArray(const T* from, size_t size, U* to) { + for (size_t i = 0; i != size; i++) { + internal::CopyArray(from[i], to + i); + } +} + +// The relation between an NativeArray object (see below) and the +// native array it represents. +enum RelationToSource { + kReference, // The NativeArray references the native array. + kCopy // The NativeArray makes a copy of the native array and + // owns the copy. +}; + +// Adapts a native array to a read-only STL-style container. Instead +// of the complete STL container concept, this adaptor only implements +// members useful for Google Mock's container matchers. New members +// should be added as needed. To simplify the implementation, we only +// support Element being a raw type (i.e. having no top-level const or +// reference modifier). It's the client's responsibility to satisfy +// this requirement. Element can be an array type itself (hence +// multi-dimensional arrays are supported). +template +class NativeArray { + public: + // STL-style container typedefs. + typedef Element value_type; + typedef Element* iterator; + typedef const Element* const_iterator; + + // Constructs from a native array. + NativeArray(const Element* array, size_t count, RelationToSource relation) { + Init(array, count, relation); + } + + // Copy constructor. + NativeArray(const NativeArray& rhs) { + Init(rhs.array_, rhs.size_, rhs.relation_to_source_); + } + + ~NativeArray() { + // Ensures that the user doesn't instantiate NativeArray with a + // const or reference type. + static_cast(StaticAssertTypeEqHelper()); + if (relation_to_source_ == kCopy) + delete[] array_; + } + + // STL-style container methods. + size_t size() const { return size_; } + const_iterator begin() const { return array_; } + const_iterator end() const { return array_ + size_; } + bool operator==(const NativeArray& rhs) const { + return size() == rhs.size() && + ArrayEq(begin(), size(), rhs.begin()); + } + + private: + // Initializes this object; makes a copy of the input array if + // 'relation' is kCopy. + void Init(const Element* array, size_t a_size, RelationToSource relation) { + if (relation == kReference) { + array_ = array; + } else { + Element* const copy = new Element[a_size]; + CopyArray(array, a_size, copy); + array_ = copy; + } + size_ = a_size; + relation_to_source_ = relation; + } + + const Element* array_; + size_t size_; + RelationToSource relation_to_source_; + + GTEST_DISALLOW_ASSIGN_(NativeArray); +}; + +} // namespace internal +} // namespace testing + +#define GTEST_MESSAGE_AT_(file, line, message, result_type) \ + ::testing::internal::AssertHelper(result_type, file, line, message) \ + = ::testing::Message() + +#define GTEST_MESSAGE_(message, result_type) \ + GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type) + +#define GTEST_FATAL_FAILURE_(message) \ + return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure) + +#define GTEST_NONFATAL_FAILURE_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure) + +#define GTEST_SUCCESS_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess) + +// Suppresses MSVC warnings 4072 (unreachable code) for the code following +// statement if it returns or throws (or doesn't return or throw in some +// situations). +#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \ + if (::testing::internal::AlwaysTrue()) { statement; } + +#define GTEST_TEST_THROW_(statement, expected_exception, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::ConstCharPtr gtest_msg = "") { \ + bool gtest_caught_expected = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (expected_exception const&) { \ + gtest_caught_expected = true; \ + } \ + catch (...) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws a different type."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + if (!gtest_caught_expected) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws nothing."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \ + fail(gtest_msg.value) + +#define GTEST_TEST_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws.") + +#define GTEST_TEST_ANY_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + bool gtest_caught_any = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + gtest_caught_any = true; \ + } \ + if (!gtest_caught_any) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \ + fail("Expected: " #statement " throws an exception.\n" \ + " Actual: it doesn't.") + + +// Implements Boolean test assertions such as EXPECT_TRUE. expression can be +// either a boolean expression or an AssertionResult. text is a textual +// represenation of expression as it was passed into the EXPECT_TRUE. +#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar_ = \ + ::testing::AssertionResult(expression)) \ + ; \ + else \ + fail(::testing::internal::GetBoolAssertionFailureMessage(\ + gtest_ar_, text, #actual, #expected).c_str()) + +#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \ + fail("Expected: " #statement " doesn't generate new fatal " \ + "failures in the current thread.\n" \ + " Actual: it does.") + +// Expands to the name of the class that implements the given test. +#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + test_case_name##_##test_name##_Test + +// Helper macro for defining tests. +#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\ +class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\ + public:\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\ + private:\ + virtual void TestBody();\ + static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\ +};\ +\ +::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\ + ::test_info_ =\ + ::testing::internal::MakeAndRegisterTestInfo(\ + #test_case_name, #test_name, NULL, NULL, \ + (parent_id), \ + parent_class::SetUpTestCase, \ + parent_class::TearDownTestCase, \ + new ::testing::internal::TestFactoryImpl<\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\ +void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for death tests. It is +// #included by gtest.h so a user doesn't need to include this +// directly. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines internal utilities needed for implementing +// death tests. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + + +#include + +namespace testing { +namespace internal { + +GTEST_DECLARE_string_(internal_run_death_test); + +// Names of the flags (needed for parsing Google Test flags). +const char kDeathTestStyleFlag[] = "death_test_style"; +const char kDeathTestUseFork[] = "death_test_use_fork"; +const char kInternalRunDeathTestFlag[] = "internal_run_death_test"; + +#if GTEST_HAS_DEATH_TEST + +// DeathTest is a class that hides much of the complexity of the +// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method +// returns a concrete class that depends on the prevailing death test +// style, as defined by the --gtest_death_test_style and/or +// --gtest_internal_run_death_test flags. + +// In describing the results of death tests, these terms are used with +// the corresponding definitions: +// +// exit status: The integer exit information in the format specified +// by wait(2) +// exit code: The integer code passed to exit(3), _exit(2), or +// returned from main() +class GTEST_API_ DeathTest { + public: + // Create returns false if there was an error determining the + // appropriate action to take for the current death test; for example, + // if the gtest_death_test_style flag is set to an invalid value. + // The LastMessage method will return a more detailed message in that + // case. Otherwise, the DeathTest pointer pointed to by the "test" + // argument is set. If the death test should be skipped, the pointer + // is set to NULL; otherwise, it is set to the address of a new concrete + // DeathTest object that controls the execution of the current test. + static bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); + DeathTest(); + virtual ~DeathTest() { } + + // A helper class that aborts a death test when it's deleted. + class ReturnSentinel { + public: + explicit ReturnSentinel(DeathTest* test) : test_(test) { } + ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } + private: + DeathTest* const test_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); + } GTEST_ATTRIBUTE_UNUSED_; + + // An enumeration of possible roles that may be taken when a death + // test is encountered. EXECUTE means that the death test logic should + // be executed immediately. OVERSEE means that the program should prepare + // the appropriate environment for a child process to execute the death + // test, then wait for it to complete. + enum TestRole { OVERSEE_TEST, EXECUTE_TEST }; + + // An enumeration of the three reasons that a test might be aborted. + enum AbortReason { + TEST_ENCOUNTERED_RETURN_STATEMENT, + TEST_THREW_EXCEPTION, + TEST_DID_NOT_DIE + }; + + // Assumes one of the above roles. + virtual TestRole AssumeRole() = 0; + + // Waits for the death test to finish and returns its status. + virtual int Wait() = 0; + + // Returns true if the death test passed; that is, the test process + // exited during the test, its exit status matches a user-supplied + // predicate, and its stderr output matches a user-supplied regular + // expression. + // The user-supplied predicate may be a macro expression rather + // than a function pointer or functor, or else Wait and Passed could + // be combined. + virtual bool Passed(bool exit_status_ok) = 0; + + // Signals that the death test did not die as expected. + virtual void Abort(AbortReason reason) = 0; + + // Returns a human-readable outcome message regarding the outcome of + // the last death test. + static const char* LastMessage(); + + static void set_last_death_test_message(const String& message); + + private: + // A string containing a description of the outcome of the last death test. + static String last_death_test_message_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); +}; + +// Factory interface for death tests. May be mocked out for testing. +class DeathTestFactory { + public: + virtual ~DeathTestFactory() { } + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) = 0; +}; + +// A concrete DeathTestFactory implementation for normal use. +class DefaultDeathTestFactory : public DeathTestFactory { + public: + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); +}; + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +GTEST_API_ bool ExitedUnsuccessfully(int exit_status); + +// Traps C++ exceptions escaping statement and reports them as test +// failures. Note that trapping SEH exceptions is not implemented here. +# if GTEST_HAS_EXCEPTIONS +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const ::std::exception& gtest_exception) { \ + fprintf(\ + stderr, \ + "\n%s: Caught std::exception-derived exception escaping the " \ + "death test statement. Exception message: %s\n", \ + ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ + gtest_exception.what()); \ + fflush(stderr); \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } catch (...) { \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } + +# else +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) + +# endif + +// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, +// ASSERT_EXIT*, and EXPECT_EXIT*. +# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + const ::testing::internal::RE& gtest_regex = (regex); \ + ::testing::internal::DeathTest* gtest_dt; \ + if (!::testing::internal::DeathTest::Create(#statement, >est_regex, \ + __FILE__, __LINE__, >est_dt)) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + if (gtest_dt != NULL) { \ + ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \ + gtest_dt_ptr(gtest_dt); \ + switch (gtest_dt->AssumeRole()) { \ + case ::testing::internal::DeathTest::OVERSEE_TEST: \ + if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + break; \ + case ::testing::internal::DeathTest::EXECUTE_TEST: { \ + ::testing::internal::DeathTest::ReturnSentinel \ + gtest_sentinel(gtest_dt); \ + GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \ + gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \ + break; \ + } \ + default: \ + break; \ + } \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \ + fail(::testing::internal::DeathTest::LastMessage()) +// The symbol "fail" here expands to something into which a message +// can be streamed. + +// A class representing the parsed contents of the +// --gtest_internal_run_death_test flag, as it existed when +// RUN_ALL_TESTS was called. +class InternalRunDeathTestFlag { + public: + InternalRunDeathTestFlag(const String& a_file, + int a_line, + int an_index, + int a_write_fd) + : file_(a_file), line_(a_line), index_(an_index), + write_fd_(a_write_fd) {} + + ~InternalRunDeathTestFlag() { + if (write_fd_ >= 0) + posix::Close(write_fd_); + } + + String file() const { return file_; } + int line() const { return line_; } + int index() const { return index_; } + int write_fd() const { return write_fd_; } + + private: + String file_; + int line_; + int index_; + int write_fd_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag); +}; + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag(); + +#else // GTEST_HAS_DEATH_TEST + +// This macro is used for implementing macros such as +// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where +// death tests are not supported. Those macros must compile on such systems +// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on +// systems that support death tests. This allows one to write such a macro +// on a system that does not support death tests and be sure that it will +// compile on a death-test supporting system. +// +// Parameters: +// statement - A statement that a macro such as EXPECT_DEATH would test +// for program termination. This macro has to make sure this +// statement is compiled but not executed, to ensure that +// EXPECT_DEATH_IF_SUPPORTED compiles with a certain +// parameter iff EXPECT_DEATH compiles with it. +// regex - A regex that a macro such as EXPECT_DEATH would use to test +// the output of statement. This parameter has to be +// compiled but not evaluated by this macro, to ensure that +// this macro only accepts expressions that a macro such as +// EXPECT_DEATH would accept. +// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED +// and a return statement for ASSERT_DEATH_IF_SUPPORTED. +// This ensures that ASSERT_DEATH_IF_SUPPORTED will not +// compile inside functions where ASSERT_DEATH doesn't +// compile. +// +// The branch that has an always false condition is used to ensure that +// statement and regex are compiled (and thus syntactically correct) but +// never executed. The unreachable code macro protects the terminator +// statement from generating an 'unreachable code' warning in case +// statement unconditionally returns or throws. The Message constructor at +// the end allows the syntax of streaming additional messages into the +// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. +# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_LOG_(WARNING) \ + << "Death tests are not supported on this platform.\n" \ + << "Statement '" #statement "' cannot be verified."; \ + } else if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::RE::PartialMatch(".*", (regex)); \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + terminator; \ + } else \ + ::testing::Message() + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + +namespace testing { + +// This flag controls the style of death tests. Valid values are "threadsafe", +// meaning that the death test child process will re-execute the test binary +// from the start, running only a single death test, or "fast", +// meaning that the child process will execute the test logic immediately +// after forking. +GTEST_DECLARE_string_(death_test_style); + +#if GTEST_HAS_DEATH_TEST + +// The following macros are useful for writing death tests. + +// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is +// executed: +// +// 1. It generates a warning if there is more than one active +// thread. This is because it's safe to fork() or clone() only +// when there is a single thread. +// +// 2. The parent process clone()s a sub-process and runs the death +// test in it; the sub-process exits with code 0 at the end of the +// death test, if it hasn't exited already. +// +// 3. The parent process waits for the sub-process to terminate. +// +// 4. The parent process checks the exit code and error message of +// the sub-process. +// +// Examples: +// +// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number"); +// for (int i = 0; i < 5; i++) { +// EXPECT_DEATH(server.ProcessRequest(i), +// "Invalid request .* in ProcessRequest()") +// << "Failed to die on request " << i); +// } +// +// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting"); +// +// bool KilledBySIGHUP(int exit_code) { +// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP; +// } +// +// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!"); +// +// On the regular expressions used in death tests: +// +// On POSIX-compliant systems (*nix), we use the library, +// which uses the POSIX extended regex syntax. +// +// On other platforms (e.g. Windows), we only support a simple regex +// syntax implemented as part of Google Test. This limited +// implementation should be enough most of the time when writing +// death tests; though it lacks many features you can find in PCRE +// or POSIX extended regex syntax. For example, we don't support +// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and +// repetition count ("x{5,7}"), among others. +// +// Below is the syntax that we do support. We chose it to be a +// subset of both PCRE and POSIX extended regex, so it's easy to +// learn wherever you come from. In the following: 'A' denotes a +// literal character, period (.), or a single \\ escape sequence; +// 'x' and 'y' denote regular expressions; 'm' and 'n' are for +// natural numbers. +// +// c matches any literal character c +// \\d matches any decimal digit +// \\D matches any character that's not a decimal digit +// \\f matches \f +// \\n matches \n +// \\r matches \r +// \\s matches any ASCII whitespace, including \n +// \\S matches any character that's not a whitespace +// \\t matches \t +// \\v matches \v +// \\w matches any letter, _, or decimal digit +// \\W matches any character that \\w doesn't match +// \\c matches any literal character c, which must be a punctuation +// . matches any single character except \n +// A? matches 0 or 1 occurrences of A +// A* matches 0 or many occurrences of A +// A+ matches 1 or many occurrences of A +// ^ matches the beginning of a string (not that of each line) +// $ matches the end of a string (not that of each line) +// xy matches x followed by y +// +// If you accidentally use PCRE or POSIX extended regex features +// not implemented by us, you will get a run-time failure. In that +// case, please try to rewrite your regular expression within the +// above syntax. +// +// This implementation is *not* meant to be as highly tuned or robust +// as a compiled regex library, but should perform well enough for a +// death test, which already incurs significant overhead by launching +// a child process. +// +// Known caveats: +// +// A "threadsafe" style death test obtains the path to the test +// program from argv[0] and re-executes it in the sub-process. For +// simplicity, the current implementation doesn't search the PATH +// when launching the sub-process. This means that the user must +// invoke the test program via a path that contains at least one +// path separator (e.g. path/to/foo_test and +// /absolute/path/to/bar_test are fine, but foo_test is not). This +// is rarely a problem as people usually don't put the test binary +// directory in PATH. +// +// TODO(wan@google.com): make thread-safe death tests search the PATH. + +// Asserts that a given statement causes the program to exit, with an +// integer exit status that satisfies predicate, and emitting error output +// that matches regex. +# define ASSERT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_) + +// Like ASSERT_EXIT, but continues on to successive tests in the +// test case, if any: +# define EXPECT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_) + +// Asserts that a given statement causes the program to exit, either by +// explicitly exiting with a nonzero exit code or being killed by a +// signal, and emitting error output that matches regex. +# define ASSERT_DEATH(statement, regex) \ + ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Like ASSERT_DEATH, but continues on to successive tests in the +// test case, if any: +# define EXPECT_DEATH(statement, regex) \ + EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: + +// Tests that an exit code describes a normal exit with a given exit code. +class GTEST_API_ ExitedWithCode { + public: + explicit ExitedWithCode(int exit_code); + bool operator()(int exit_status) const; + private: + // No implementation - assignment is unsupported. + void operator=(const ExitedWithCode& other); + + const int exit_code_; +}; + +# if !GTEST_OS_WINDOWS +// Tests that an exit code describes an exit due to termination by a +// given signal. +class GTEST_API_ KilledBySignal { + public: + explicit KilledBySignal(int signum); + bool operator()(int exit_status) const; + private: + const int signum_; +}; +# endif // !GTEST_OS_WINDOWS + +// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. +// The death testing framework causes this to have interesting semantics, +// since the sideeffects of the call are only visible in opt mode, and not +// in debug mode. +// +// In practice, this can be used to test functions that utilize the +// LOG(DFATAL) macro using the following style: +// +// int DieInDebugOr12(int* sideeffect) { +// if (sideeffect) { +// *sideeffect = 12; +// } +// LOG(DFATAL) << "death"; +// return 12; +// } +// +// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) { +// int sideeffect = 0; +// // Only asserts in dbg. +// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death"); +// +// #ifdef NDEBUG +// // opt-mode has sideeffect visible. +// EXPECT_EQ(12, sideeffect); +// #else +// // dbg-mode no visible sideeffect. +// EXPECT_EQ(0, sideeffect); +// #endif +// } +// +// This will assert that DieInDebugReturn12InOpt() crashes in debug +// mode, usually due to a DCHECK or LOG(DFATAL), but returns the +// appropriate fallback value (12 in this case) in opt mode. If you +// need to test that a function has appropriate side-effects in opt +// mode, include assertions against the side-effects. A general +// pattern for this is: +// +// EXPECT_DEBUG_DEATH({ +// // Side-effects here will have an effect after this statement in +// // opt mode, but none in debug mode. +// EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); +// }, "death"); +// +# ifdef NDEBUG + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + do { statement; } while (::testing::internal::AlwaysFalse()) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + do { statement; } while (::testing::internal::AlwaysFalse()) + +# else + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + EXPECT_DEATH(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + ASSERT_DEATH(statement, regex) + +# endif // NDEBUG for EXPECT_DEBUG_DEATH +#endif // GTEST_HAS_DEATH_TEST + +// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and +// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if +// death tests are supported; otherwise they just issue a warning. This is +// useful when you are combining death test assertions with normal test +// assertions in one test. +#if GTEST_HAS_DEATH_TEST +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + EXPECT_DEATH(statement, regex) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + ASSERT_DEATH(statement, regex) +#else +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, ) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return) +#endif + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the Message class. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! + +#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ + +#include + + +namespace testing { + +// The Message class works like an ostream repeater. +// +// Typical usage: +// +// 1. You stream a bunch of values to a Message object. +// It will remember the text in a stringstream. +// 2. Then you stream the Message object to an ostream. +// This causes the text in the Message to be streamed +// to the ostream. +// +// For example; +// +// testing::Message foo; +// foo << 1 << " != " << 2; +// std::cout << foo; +// +// will print "1 != 2". +// +// Message is not intended to be inherited from. In particular, its +// destructor is not virtual. +// +// Note that stringstream behaves differently in gcc and in MSVC. You +// can stream a NULL char pointer to it in the former, but not in the +// latter (it causes an access violation if you do). The Message +// class hides this difference by treating a NULL char pointer as +// "(null)". +class GTEST_API_ Message { + private: + // The type of basic IO manipulators (endl, ends, and flush) for + // narrow streams. + typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&); + + public: + // Constructs an empty Message. + // We allocate the stringstream separately because otherwise each use of + // ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's + // stack frame leading to huge stack frames in some cases; gcc does not reuse + // the stack space. + Message() : ss_(new ::std::stringstream) { + // By default, we want there to be enough precision when printing + // a double to a Message. + *ss_ << std::setprecision(std::numeric_limits::digits10 + 2); + } + + // Copy constructor. + Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT + *ss_ << msg.GetString(); + } + + // Constructs a Message from a C-string. + explicit Message(const char* str) : ss_(new ::std::stringstream) { + *ss_ << str; + } + +#if GTEST_OS_SYMBIAN + // Streams a value (either a pointer or not) to this object. + template + inline Message& operator <<(const T& value) { + StreamHelper(typename internal::is_pointer::type(), value); + return *this; + } +#else + // Streams a non-pointer value to this object. + template + inline Message& operator <<(const T& val) { + ::GTestStreamToHelper(ss_.get(), val); + return *this; + } + + // Streams a pointer value to this object. + // + // This function is an overload of the previous one. When you + // stream a pointer to a Message, this definition will be used as it + // is more specialized. (The C++ Standard, section + // [temp.func.order].) If you stream a non-pointer, then the + // previous definition will be used. + // + // The reason for this overload is that streaming a NULL pointer to + // ostream is undefined behavior. Depending on the compiler, you + // may get "0", "(nil)", "(null)", or an access violation. To + // ensure consistent result across compilers, we always treat NULL + // as "(null)". + template + inline Message& operator <<(T* const& pointer) { // NOLINT + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + ::GTestStreamToHelper(ss_.get(), pointer); + } + return *this; + } +#endif // GTEST_OS_SYMBIAN + + // Since the basic IO manipulators are overloaded for both narrow + // and wide streams, we have to provide this specialized definition + // of operator <<, even though its body is the same as the + // templatized version above. Without this definition, streaming + // endl or other basic IO manipulators to Message will confuse the + // compiler. + Message& operator <<(BasicNarrowIoManip val) { + *ss_ << val; + return *this; + } + + // Instead of 1/0, we want to see true/false for bool values. + Message& operator <<(bool b) { + return *this << (b ? "true" : "false"); + } + + // These two overloads allow streaming a wide C string to a Message + // using the UTF-8 encoding. + Message& operator <<(const wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); + } + Message& operator <<(wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); + } + +#if GTEST_HAS_STD_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::std::wstring& wstr); +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::wstring& wstr); +#endif // GTEST_HAS_GLOBAL_WSTRING + + // Gets the text streamed to this object so far as a String. + // Each '\0' character in the buffer is replaced with "\\0". + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::String GetString() const { + return internal::StringStreamToString(ss_.get()); + } + + private: + +#if GTEST_OS_SYMBIAN + // These are needed as the Nokia Symbian Compiler cannot decide between + // const T& and const T* in a function template. The Nokia compiler _can_ + // decide between class template specializations for T and T*, so a + // tr1::type_traits-like is_pointer works, and we can overload on that. + template + inline void StreamHelper(internal::true_type /*dummy*/, T* pointer) { + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + ::GTestStreamToHelper(ss_.get(), pointer); + } + } + template + inline void StreamHelper(internal::false_type /*dummy*/, const T& value) { + ::GTestStreamToHelper(ss_.get(), value); + } +#endif // GTEST_OS_SYMBIAN + + // We'll hold the text streamed to this object here. + const internal::scoped_ptr< ::std::stringstream> ss_; + + // We declare (but don't implement) this to prevent the compiler + // from implementing the assignment operator. + void operator=(const Message&); +}; + +// Streams a Message to an ostream. +inline std::ostream& operator <<(std::ostream& os, const Message& sb) { + return os << sb.GetString(); +} + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +// This file was GENERATED by command: +// pump.py gtest-param-test.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: vladl@google.com (Vlad Losev) +// +// Macros and functions for implementing parameterized tests +// in Google C++ Testing Framework (Google Test) +// +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ + + +// Value-parameterized tests allow you to test your code with different +// parameters without writing multiple copies of the same test. +// +// Here is how you use value-parameterized tests: + +#if 0 + +// To write value-parameterized tests, first you should define a fixture +// class. It is usually derived from testing::TestWithParam (see below for +// another inheritance scheme that's sometimes useful in more complicated +// class hierarchies), where the type of your parameter values. +// TestWithParam is itself derived from testing::Test. T can be any +// copyable type. If it's a raw pointer, you are responsible for managing the +// lifespan of the pointed values. + +class FooTest : public ::testing::TestWithParam { + // You can implement all the usual class fixture members here. +}; + +// Then, use the TEST_P macro to define as many parameterized tests +// for this fixture as you want. The _P suffix is for "parameterized" +// or "pattern", whichever you prefer to think. + +TEST_P(FooTest, DoesBlah) { + // Inside a test, access the test parameter with the GetParam() method + // of the TestWithParam class: + EXPECT_TRUE(foo.Blah(GetParam())); + ... +} + +TEST_P(FooTest, HasBlahBlah) { + ... +} + +// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test +// case with any set of parameters you want. Google Test defines a number +// of functions for generating test parameters. They return what we call +// (surprise!) parameter generators. Here is a summary of them, which +// are all in the testing namespace: +// +// +// Range(begin, end [, step]) - Yields values {begin, begin+step, +// begin+step+step, ...}. The values do not +// include end. step defaults to 1. +// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}. +// ValuesIn(container) - Yields values from a C-style array, an STL +// ValuesIn(begin,end) container, or an iterator range [begin, end). +// Bool() - Yields sequence {false, true}. +// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product +// for the math savvy) of the values generated +// by the N generators. +// +// For more details, see comments at the definitions of these functions below +// in this file. +// +// The following statement will instantiate tests from the FooTest test case +// each with parameter values "meeny", "miny", and "moe". + +INSTANTIATE_TEST_CASE_P(InstantiationName, + FooTest, + Values("meeny", "miny", "moe")); + +// To distinguish different instances of the pattern, (yes, you +// can instantiate it more then once) the first argument to the +// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the +// actual test case name. Remember to pick unique prefixes for different +// instantiations. The tests from the instantiation above will have +// these names: +// +// * InstantiationName/FooTest.DoesBlah/0 for "meeny" +// * InstantiationName/FooTest.DoesBlah/1 for "miny" +// * InstantiationName/FooTest.DoesBlah/2 for "moe" +// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny" +// * InstantiationName/FooTest.HasBlahBlah/1 for "miny" +// * InstantiationName/FooTest.HasBlahBlah/2 for "moe" +// +// You can use these names in --gtest_filter. +// +// This statement will instantiate all tests from FooTest again, each +// with parameter values "cat" and "dog": + +const char* pets[] = {"cat", "dog"}; +INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets)); + +// The tests from the instantiation above will have these names: +// +// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog" +// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog" +// +// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests +// in the given test case, whether their definitions come before or +// AFTER the INSTANTIATE_TEST_CASE_P statement. +// +// Please also note that generator expressions (including parameters to the +// generators) are evaluated in InitGoogleTest(), after main() has started. +// This allows the user on one hand, to adjust generator parameters in order +// to dynamically determine a set of tests to run and on the other hand, +// give the user a chance to inspect the generated tests with Google Test +// reflection API before RUN_ALL_TESTS() is executed. +// +// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc +// for more examples. +// +// In the future, we plan to publish the API for defining new parameter +// generators. But for now this interface remains part of the internal +// implementation and is subject to change. +// +// +// A parameterized test fixture must be derived from testing::Test and from +// testing::WithParamInterface, where T is the type of the parameter +// values. Inheriting from TestWithParam satisfies that requirement because +// TestWithParam inherits from both Test and WithParamInterface. In more +// complicated hierarchies, however, it is occasionally useful to inherit +// separately from Test and WithParamInterface. For example: + +class BaseTest : public ::testing::Test { + // You can inherit all the usual members for a non-parameterized test + // fixture here. +}; + +class DerivedTest : public BaseTest, public ::testing::WithParamInterface { + // The usual test fixture members go here too. +}; + +TEST_F(BaseTest, HasFoo) { + // This is an ordinary non-parameterized test. +} + +TEST_P(DerivedTest, DoesBlah) { + // GetParam works just the same here as if you inherit from TestWithParam. + EXPECT_TRUE(foo.Blah(GetParam())); +} + +#endif // 0 + + +#if !GTEST_OS_SYMBIAN +# include +#endif + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ + +#include +#include +#include + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2003 Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: Dan Egnor (egnor@google.com) +// +// A "smart" pointer type with reference tracking. Every pointer to a +// particular object is kept on a circular linked list. When the last pointer +// to an object is destroyed or reassigned, the object is deleted. +// +// Used properly, this deletes the object when the last reference goes away. +// There are several caveats: +// - Like all reference counting schemes, cycles lead to leaks. +// - Each smart pointer is actually two pointers (8 bytes instead of 4). +// - Every time a pointer is assigned, the entire list of pointers to that +// object is traversed. This class is therefore NOT SUITABLE when there +// will often be more than two or three pointers to a particular object. +// - References are only tracked as long as linked_ptr<> objects are copied. +// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS +// will happen (double deletion). +// +// A good use of this class is storing object references in STL containers. +// You can safely put linked_ptr<> in a vector<>. +// Other uses may not be as good. +// +// Note: If you use an incomplete type with linked_ptr<>, the class +// *containing* linked_ptr<> must have a constructor and destructor (even +// if they do nothing!). +// +// Bill Gibbons suggested we use something like this. +// +// Thread Safety: +// Unlike other linked_ptr implementations, in this implementation +// a linked_ptr object is thread-safe in the sense that: +// - it's safe to copy linked_ptr objects concurrently, +// - it's safe to copy *from* a linked_ptr and read its underlying +// raw pointer (e.g. via get()) concurrently, and +// - it's safe to write to two linked_ptrs that point to the same +// shared object concurrently. +// TODO(wan@google.com): rename this to safe_linked_ptr to avoid +// confusion with normal linked_ptr. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ + +#include +#include + + +namespace testing { +namespace internal { + +// Protects copying of all linked_ptr objects. +GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// This is used internally by all instances of linked_ptr<>. It needs to be +// a non-template class because different types of linked_ptr<> can refer to +// the same object (linked_ptr(obj) vs linked_ptr(obj)). +// So, it needs to be possible for different types of linked_ptr to participate +// in the same circular linked list, so we need a single class type here. +// +// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr. +class linked_ptr_internal { + public: + // Create a new circle that includes only this instance. + void join_new() { + next_ = this; + } + + // Many linked_ptr operations may change p.link_ for some linked_ptr + // variable p in the same circle as this object. Therefore we need + // to prevent two such operations from occurring concurrently. + // + // Note that different types of linked_ptr objects can coexist in a + // circle (e.g. linked_ptr, linked_ptr, and + // linked_ptr). Therefore we must use a single mutex to + // protect all linked_ptr objects. This can create serious + // contention in production code, but is acceptable in a testing + // framework. + + // Join an existing circle. + // L < g_linked_ptr_mutex + void join(linked_ptr_internal const* ptr) { + MutexLock lock(&g_linked_ptr_mutex); + + linked_ptr_internal const* p = ptr; + while (p->next_ != ptr) p = p->next_; + p->next_ = this; + next_ = ptr; + } + + // Leave whatever circle we're part of. Returns true if we were the + // last member of the circle. Once this is done, you can join() another. + // L < g_linked_ptr_mutex + bool depart() { + MutexLock lock(&g_linked_ptr_mutex); + + if (next_ == this) return true; + linked_ptr_internal const* p = next_; + while (p->next_ != this) p = p->next_; + p->next_ = next_; + return false; + } + + private: + mutable linked_ptr_internal const* next_; +}; + +template +class linked_ptr { + public: + typedef T element_type; + + // Take over ownership of a raw pointer. This should happen as soon as + // possible after the object is created. + explicit linked_ptr(T* ptr = NULL) { capture(ptr); } + ~linked_ptr() { depart(); } + + // Copy an existing linked_ptr<>, adding ourselves to the list of references. + template linked_ptr(linked_ptr const& ptr) { copy(&ptr); } + linked_ptr(linked_ptr const& ptr) { // NOLINT + assert(&ptr != this); + copy(&ptr); + } + + // Assignment releases the old value and acquires the new. + template linked_ptr& operator=(linked_ptr const& ptr) { + depart(); + copy(&ptr); + return *this; + } + + linked_ptr& operator=(linked_ptr const& ptr) { + if (&ptr != this) { + depart(); + copy(&ptr); + } + return *this; + } + + // Smart pointer members. + void reset(T* ptr = NULL) { + depart(); + capture(ptr); + } + T* get() const { return value_; } + T* operator->() const { return value_; } + T& operator*() const { return *value_; } + + bool operator==(T* p) const { return value_ == p; } + bool operator!=(T* p) const { return value_ != p; } + template + bool operator==(linked_ptr const& ptr) const { + return value_ == ptr.get(); + } + template + bool operator!=(linked_ptr const& ptr) const { + return value_ != ptr.get(); + } + + private: + template + friend class linked_ptr; + + T* value_; + linked_ptr_internal link_; + + void depart() { + if (link_.depart()) delete value_; + } + + void capture(T* ptr) { + value_ = ptr; + link_.join_new(); + } + + template void copy(linked_ptr const* ptr) { + value_ = ptr->get(); + if (value_) + link_.join(&ptr->link_); + else + link_.join_new(); + } +}; + +template inline +bool operator==(T* ptr, const linked_ptr& x) { + return ptr == x.get(); +} + +template inline +bool operator!=(T* ptr, const linked_ptr& x) { + return ptr != x.get(); +} + +// A function to convert T* into linked_ptr +// Doing e.g. make_linked_ptr(new FooBarBaz(arg)) is a shorter notation +// for linked_ptr >(new FooBarBaz(arg)) +template +linked_ptr make_linked_ptr(T* ptr) { + return linked_ptr(ptr); +} + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// A user can teach this function how to print a class type T by +// defining either operator<<() or PrintTo() in the namespace that +// defines T. More specifically, the FIRST defined function in the +// following list will be used (assuming T is defined in namespace +// foo): +// +// 1. foo::PrintTo(const T&, ostream*) +// 2. operator<<(ostream&, const T&) defined in either foo or the +// global namespace. +// +// If none of the above is defined, it will print the debug string of +// the value if it is a protocol buffer, or print the raw bytes in the +// value otherwise. +// +// To aid debugging: when T is a reference type, the address of the +// value is also printed; when T is a (const) char pointer, both the +// pointer value and the NUL-terminated string it points to are +// printed. +// +// We also provide some convenient wrappers: +// +// // Prints a value to a string. For a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// std::string ::testing::PrintToString(const T& value); +// +// // Prints a value tersely: for a reference type, the referenced +// // value (but not the address) is printed; for a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// void ::testing::internal::UniversalTersePrint(const T& value, ostream*); +// +// // Prints value using the type inferred by the compiler. The difference +// // from UniversalTersePrint() is that this function prints both the +// // pointer and the NUL-terminated string for a (const or not) char pointer. +// void ::testing::internal::UniversalPrint(const T& value, ostream*); +// +// // Prints the fields of a tuple tersely to a string vector, one +// // element for each field. Tuple support must be enabled in +// // gtest-port.h. +// std::vector UniversalTersePrintTupleFieldsToStrings( +// const Tuple& value); +// +// Known limitation: +// +// The print primitives print the elements of an STL-style container +// using the compiler-inferred type of *iter where iter is a +// const_iterator of the container. When const_iterator is an input +// iterator but not a forward iterator, this inferred type may not +// match value_type, and the print output may be incorrect. In +// practice, this is rarely a problem as for most containers +// const_iterator is a forward iterator. We'll fix this if there's an +// actual need for it. Note that this fix cannot rely on value_type +// being defined as many user-defined container types don't have +// value_type. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#include // NOLINT +#include +#include +#include +#include + +namespace testing { + +// Definitions in the 'internal' and 'internal2' name spaces are +// subject to change without notice. DO NOT USE THEM IN USER CODE! +namespace internal2 { + +// Prints the given number of bytes in the given object to the given +// ostream. +GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes, + size_t count, + ::std::ostream* os); + +// For selecting which printer to use when a given type has neither << +// nor PrintTo(). +enum TypeKind { + kProtobuf, // a protobuf type + kConvertibleToInteger, // a type implicitly convertible to BiggestInt + // (e.g. a named or unnamed enum type) + kOtherType // anything else +}; + +// TypeWithoutFormatter::PrintValue(value, os) is called +// by the universal printer to print a value of type T when neither +// operator<< nor PrintTo() is defined for T, where kTypeKind is the +// "kind" of T as defined by enum TypeKind. +template +class TypeWithoutFormatter { + public: + // This default version is called when kTypeKind is kOtherType. + static void PrintValue(const T& value, ::std::ostream* os) { + PrintBytesInObjectTo(reinterpret_cast(&value), + sizeof(value), os); + } +}; + +// We print a protobuf using its ShortDebugString() when the string +// doesn't exceed this many characters; otherwise we print it using +// DebugString() for better readability. +const size_t kProtobufOneLinerMaxLength = 50; + +template +class TypeWithoutFormatter { + public: + static void PrintValue(const T& value, ::std::ostream* os) { + const ::testing::internal::string short_str = value.ShortDebugString(); + const ::testing::internal::string pretty_str = + short_str.length() <= kProtobufOneLinerMaxLength ? + short_str : ("\n" + value.DebugString()); + *os << ("<" + pretty_str + ">"); + } +}; + +template +class TypeWithoutFormatter { + public: + // Since T has no << operator or PrintTo() but can be implicitly + // converted to BiggestInt, we print it as a BiggestInt. + // + // Most likely T is an enum type (either named or unnamed), in which + // case printing it as an integer is the desired behavior. In case + // T is not an enum, printing it as an integer is the best we can do + // given that it has no user-defined printer. + static void PrintValue(const T& value, ::std::ostream* os) { + const internal::BiggestInt kBigInt = value; + *os << kBigInt; + } +}; + +// Prints the given value to the given ostream. If the value is a +// protocol message, its debug string is printed; if it's an enum or +// of a type implicitly convertible to BiggestInt, it's printed as an +// integer; otherwise the bytes in the value are printed. This is +// what UniversalPrinter::Print() does when it knows nothing about +// type T and T has neither << operator nor PrintTo(). +// +// A user can override this behavior for a class type Foo by defining +// a << operator in the namespace where Foo is defined. +// +// We put this operator in namespace 'internal2' instead of 'internal' +// to simplify the implementation, as much code in 'internal' needs to +// use << in STL, which would conflict with our own << were it defined +// in 'internal'. +// +// Note that this operator<< takes a generic std::basic_ostream type instead of the more restricted std::ostream. If +// we define it to take an std::ostream instead, we'll get an +// "ambiguous overloads" compiler error when trying to print a type +// Foo that supports streaming to std::basic_ostream, as the compiler cannot tell whether +// operator<<(std::ostream&, const T&) or +// operator<<(std::basic_stream, const Foo&) is more +// specific. +template +::std::basic_ostream& operator<<( + ::std::basic_ostream& os, const T& x) { + TypeWithoutFormatter::value ? kProtobuf : + internal::ImplicitlyConvertible::value ? + kConvertibleToInteger : kOtherType)>::PrintValue(x, &os); + return os; +} + +} // namespace internal2 +} // namespace testing + +// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up +// magic needed for implementing UniversalPrinter won't work. +namespace testing_internal { + +// Used to print a value that is not an STL-style container when the +// user doesn't define PrintTo() for it. +template +void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) { + // With the following statement, during unqualified name lookup, + // testing::internal2::operator<< appears as if it was declared in + // the nearest enclosing namespace that contains both + // ::testing_internal and ::testing::internal2, i.e. the global + // namespace. For more details, refer to the C++ Standard section + // 7.3.4-1 [namespace.udir]. This allows us to fall back onto + // testing::internal2::operator<< in case T doesn't come with a << + // operator. + // + // We cannot write 'using ::testing::internal2::operator<<;', which + // gcc 3.3 fails to compile due to a compiler bug. + using namespace ::testing::internal2; // NOLINT + + // Assuming T is defined in namespace foo, in the next statement, + // the compiler will consider all of: + // + // 1. foo::operator<< (thanks to Koenig look-up), + // 2. ::operator<< (as the current namespace is enclosed in ::), + // 3. testing::internal2::operator<< (thanks to the using statement above). + // + // The operator<< whose type matches T best will be picked. + // + // We deliberately allow #2 to be a candidate, as sometimes it's + // impossible to define #1 (e.g. when foo is ::std, defining + // anything in it is undefined behavior unless you are a compiler + // vendor.). + *os << value; +} + +} // namespace testing_internal + +namespace testing { +namespace internal { + +// UniversalPrinter::Print(value, ostream_ptr) prints the given +// value to the given ostream. The caller must ensure that +// 'ostream_ptr' is not NULL, or the behavior is undefined. +// +// We define UniversalPrinter as a class template (as opposed to a +// function template), as we need to partially specialize it for +// reference types, which cannot be done with function templates. +template +class UniversalPrinter; + +template +void UniversalPrint(const T& value, ::std::ostream* os); + +// Used to print an STL-style container when the user doesn't define +// a PrintTo() for it. +template +void DefaultPrintTo(IsContainer /* dummy */, + false_type /* is not a pointer */, + const C& container, ::std::ostream* os) { + const size_t kMaxCount = 32; // The maximum number of elements to print. + *os << '{'; + size_t count = 0; + for (typename C::const_iterator it = container.begin(); + it != container.end(); ++it, ++count) { + if (count > 0) { + *os << ','; + if (count == kMaxCount) { // Enough has been printed. + *os << " ..."; + break; + } + } + *os << ' '; + // We cannot call PrintTo(*it, os) here as PrintTo() doesn't + // handle *it being a native array. + internal::UniversalPrint(*it, os); + } + + if (count > 0) { + *os << ' '; + } + *os << '}'; +} + +// Used to print a pointer that is neither a char pointer nor a member +// pointer, when the user doesn't define PrintTo() for it. (A member +// variable pointer or member function pointer doesn't really point to +// a location in the address space. Their representation is +// implementation-defined. Therefore they will be printed as raw +// bytes.) +template +void DefaultPrintTo(IsNotContainer /* dummy */, + true_type /* is a pointer */, + T* p, ::std::ostream* os) { + if (p == NULL) { + *os << "NULL"; + } else { + // C++ doesn't allow casting from a function pointer to any object + // pointer. + // + // IsTrue() silences warnings: "Condition is always true", + // "unreachable code". + if (IsTrue(ImplicitlyConvertible::value)) { + // T is not a function type. We just call << to print p, + // relying on ADL to pick up user-defined << for their pointer + // types, if any. + *os << p; + } else { + // T is a function type, so '*os << p' doesn't do what we want + // (it just prints p as bool). We want to print p as a const + // void*. However, we cannot cast it to const void* directly, + // even using reinterpret_cast, as earlier versions of gcc + // (e.g. 3.4.5) cannot compile the cast when p is a function + // pointer. Casting to UInt64 first solves the problem. + *os << reinterpret_cast( + reinterpret_cast(p)); + } + } +} + +// Used to print a non-container, non-pointer value when the user +// doesn't define PrintTo() for it. +template +void DefaultPrintTo(IsNotContainer /* dummy */, + false_type /* is not a pointer */, + const T& value, ::std::ostream* os) { + ::testing_internal::DefaultPrintNonContainerTo(value, os); +} + +// Prints the given value using the << operator if it has one; +// otherwise prints the bytes in it. This is what +// UniversalPrinter::Print() does when PrintTo() is not specialized +// or overloaded for type T. +// +// A user can override this behavior for a class type Foo by defining +// an overload of PrintTo() in the namespace where Foo is defined. We +// give the user this option as sometimes defining a << operator for +// Foo is not desirable (e.g. the coding style may prevent doing it, +// or there is already a << operator but it doesn't do what the user +// wants). +template +void PrintTo(const T& value, ::std::ostream* os) { + // DefaultPrintTo() is overloaded. The type of its first two + // arguments determine which version will be picked. If T is an + // STL-style container, the version for container will be called; if + // T is a pointer, the pointer version will be called; otherwise the + // generic version will be called. + // + // Note that we check for container types here, prior to we check + // for protocol message types in our operator<<. The rationale is: + // + // For protocol messages, we want to give people a chance to + // override Google Mock's format by defining a PrintTo() or + // operator<<. For STL containers, other formats can be + // incompatible with Google Mock's format for the container + // elements; therefore we check for container types here to ensure + // that our format is used. + // + // The second argument of DefaultPrintTo() is needed to bypass a bug + // in Symbian's C++ compiler that prevents it from picking the right + // overload between: + // + // PrintTo(const T& x, ...); + // PrintTo(T* x, ...); + DefaultPrintTo(IsContainerTest(0), is_pointer(), value, os); +} + +// The following list of PrintTo() overloads tells +// UniversalPrinter::Print() how to print standard types (built-in +// types, strings, plain arrays, and pointers). + +// Overloads for various char types. +GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os); +GTEST_API_ void PrintTo(signed char c, ::std::ostream* os); +inline void PrintTo(char c, ::std::ostream* os) { + // When printing a plain char, we always treat it as unsigned. This + // way, the output won't be affected by whether the compiler thinks + // char is signed or not. + PrintTo(static_cast(c), os); +} + +// Overloads for other simple built-in types. +inline void PrintTo(bool x, ::std::ostream* os) { + *os << (x ? "true" : "false"); +} + +// Overload for wchar_t type. +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its decimal code (except for L'\0'). +// The L'\0' char is printed as "L'\\0'". The decimal code is printed +// as signed integer when wchar_t is implemented by the compiler +// as a signed type and is printed as an unsigned integer when wchar_t +// is implemented as an unsigned type. +GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os); + +// Overloads for C strings. +GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); +inline void PrintTo(char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// signed/unsigned char is often used for representing binary data, so +// we print pointers to it as void* to be safe. +inline void PrintTo(const signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(const unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// MSVC can be configured to define wchar_t as a typedef of unsigned +// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native +// type. When wchar_t is a typedef, defining an overload for const +// wchar_t* would cause unsigned short* be printed as a wide string, +// possibly causing invalid memory accesses. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Overloads for wide C strings +GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os); +inline void PrintTo(wchar_t* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +#endif + +// Overload for C arrays. Multi-dimensional arrays are printed +// properly. + +// Prints the given number of elements in an array, without printing +// the curly braces. +template +void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) { + UniversalPrint(a[0], os); + for (size_t i = 1; i != count; i++) { + *os << ", "; + UniversalPrint(a[i], os); + } +} + +// Overloads for ::string and ::std::string. +#if GTEST_HAS_GLOBAL_STRING +GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os); +inline void PrintTo(const ::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os); +inline void PrintTo(const ::std::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} + +// Overloads for ::wstring and ::std::wstring. +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_TR1_TUPLE +// Overload for ::std::tr1::tuple. Needed for printing function arguments, +// which are packed as tuples. + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os); + +// Overloaded PrintTo() for tuples of various arities. We support +// tuples of up-to 10 fields. The following implementation works +// regardless of whether tr1::tuple is implemented using the +// non-standard variadic template feature or not. + +inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo( + const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} +#endif // GTEST_HAS_TR1_TUPLE + +// Overload for std::pair. +template +void PrintTo(const ::std::pair& value, ::std::ostream* os) { + *os << '('; + // We cannot use UniversalPrint(value.first, os) here, as T1 may be + // a reference type. The same for printing value.second. + UniversalPrinter::Print(value.first, os); + *os << ", "; + UniversalPrinter::Print(value.second, os); + *os << ')'; +} + +// Implements printing a non-reference type T by letting the compiler +// pick the right overload of PrintTo() for T. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4180) // Temporarily disables warning 4180. +#endif // _MSC_VER + + // Note: we deliberately don't call this PrintTo(), as that name + // conflicts with ::testing::internal::PrintTo in the body of the + // function. + static void Print(const T& value, ::std::ostream* os) { + // By default, ::testing::internal::PrintTo() is used for printing + // the value. + // + // Thanks to Koenig look-up, if T is a class and has its own + // PrintTo() function defined in its namespace, that function will + // be visible here. Since it is more specific than the generic ones + // in ::testing::internal, it will be picked by the compiler in the + // following statement - exactly what we want. + PrintTo(value, os); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif // _MSC_VER +}; + +// UniversalPrintArray(begin, len, os) prints an array of 'len' +// elements, starting at address 'begin'. +template +void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { + if (len == 0) { + *os << "{}"; + } else { + *os << "{ "; + const size_t kThreshold = 18; + const size_t kChunkSize = 8; + // If the array has more than kThreshold elements, we'll have to + // omit some details by printing only the first and the last + // kChunkSize elements. + // TODO(wan@google.com): let the user control the threshold using a flag. + if (len <= kThreshold) { + PrintRawArrayTo(begin, len, os); + } else { + PrintRawArrayTo(begin, kChunkSize, os); + *os << ", ..., "; + PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os); + } + *os << " }"; + } +} +// This overload prints a (const) char array compactly. +GTEST_API_ void UniversalPrintArray(const char* begin, + size_t len, + ::std::ostream* os); + +// Implements printing an array type T[N]. +template +class UniversalPrinter { + public: + // Prints the given array, omitting some elements when there are too + // many. + static void Print(const T (&a)[N], ::std::ostream* os) { + UniversalPrintArray(a, N, os); + } +}; + +// Implements printing a reference type T&. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4180) // Temporarily disables warning 4180. +#endif // _MSC_VER + + static void Print(const T& value, ::std::ostream* os) { + // Prints the address of the value. We use reinterpret_cast here + // as static_cast doesn't compile when T is a function type. + *os << "@" << reinterpret_cast(&value) << " "; + + // Then prints the value itself. + UniversalPrint(value, os); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif // _MSC_VER +}; + +// Prints a value tersely: for a reference type, the referenced value +// (but not the address) is printed; for a (const) char pointer, the +// NUL-terminated string (but not the pointer) is printed. +template +void UniversalTersePrint(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); +} +inline void UniversalTersePrint(const char* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(string(str), os); + } +} +inline void UniversalTersePrint(char* str, ::std::ostream* os) { + UniversalTersePrint(static_cast(str), os); +} + +// Prints a value using the type inferred by the compiler. The +// difference between this and UniversalTersePrint() is that for a +// (const) char pointer, this prints both the pointer and the +// NUL-terminated string. +template +void UniversalPrint(const T& value, ::std::ostream* os) { + UniversalPrinter::Print(value, os); +} + +#if GTEST_HAS_TR1_TUPLE +typedef ::std::vector Strings; + +// This helper template allows PrintTo() for tuples and +// UniversalTersePrintTupleFieldsToStrings() to be defined by +// induction on the number of tuple fields. The idea is that +// TuplePrefixPrinter::PrintPrefixTo(t, os) prints the first N +// fields in tuple t, and can be defined in terms of +// TuplePrefixPrinter. + +// The inductive case. +template +struct TuplePrefixPrinter { + // Prints the first N fields of a tuple. + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + TuplePrefixPrinter::PrintPrefixTo(t, os); + *os << ", "; + UniversalPrinter::type> + ::Print(::std::tr1::get(t), os); + } + + // Tersely prints the first N fields of a tuple to a string vector, + // one element for each field. + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + TuplePrefixPrinter::TersePrintPrefixToStrings(t, strings); + ::std::stringstream ss; + UniversalTersePrint(::std::tr1::get(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Base cases. +template <> +struct TuplePrefixPrinter<0> { + template + static void PrintPrefixTo(const Tuple&, ::std::ostream*) {} + + template + static void TersePrintPrefixToStrings(const Tuple&, Strings*) {} +}; +// We have to specialize the entire TuplePrefixPrinter<> class +// template here, even though the definition of +// TersePrintPrefixToStrings() is the same as the generic version, as +// Embarcadero (formerly CodeGear, formerly Borland) C++ doesn't +// support specializing a method template of a class template. +template <> +struct TuplePrefixPrinter<1> { + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + UniversalPrinter::type>:: + Print(::std::tr1::get<0>(t), os); + } + + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + ::std::stringstream ss; + UniversalTersePrint(::std::tr1::get<0>(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os) { + *os << "("; + TuplePrefixPrinter< ::std::tr1::tuple_size::value>:: + PrintPrefixTo(t, os); + *os << ")"; +} + +// Prints the fields of a tuple tersely to a string vector, one +// element for each field. See the comment before +// UniversalTersePrint() for how we define "tersely". +template +Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) { + Strings result; + TuplePrefixPrinter< ::std::tr1::tuple_size::value>:: + TersePrintPrefixToStrings(value, &result); + return result; +} +#endif // GTEST_HAS_TR1_TUPLE + +} // namespace internal + +template +::std::string PrintToString(const T& value) { + ::std::stringstream ss; + internal::UniversalTersePrint(value, &ss); + return ss.str(); +} + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { +namespace internal { + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Outputs a message explaining invalid registration of different +// fixture class for the same test case. This may happen when +// TEST_P macro is used to define two tests with the same name +// but in different namespaces. +GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line); + +template class ParamGeneratorInterface; +template class ParamGenerator; + +// Interface for iterating over elements provided by an implementation +// of ParamGeneratorInterface. +template +class ParamIteratorInterface { + public: + virtual ~ParamIteratorInterface() {} + // A pointer to the base generator instance. + // Used only for the purposes of iterator comparison + // to make sure that two iterators belong to the same generator. + virtual const ParamGeneratorInterface* BaseGenerator() const = 0; + // Advances iterator to point to the next element + // provided by the generator. The caller is responsible + // for not calling Advance() on an iterator equal to + // BaseGenerator()->End(). + virtual void Advance() = 0; + // Clones the iterator object. Used for implementing copy semantics + // of ParamIterator. + virtual ParamIteratorInterface* Clone() const = 0; + // Dereferences the current iterator and provides (read-only) access + // to the pointed value. It is the caller's responsibility not to call + // Current() on an iterator equal to BaseGenerator()->End(). + // Used for implementing ParamGenerator::operator*(). + virtual const T* Current() const = 0; + // Determines whether the given iterator and other point to the same + // element in the sequence generated by the generator. + // Used for implementing ParamGenerator::operator==(). + virtual bool Equals(const ParamIteratorInterface& other) const = 0; +}; + +// Class iterating over elements provided by an implementation of +// ParamGeneratorInterface. It wraps ParamIteratorInterface +// and implements the const forward iterator concept. +template +class ParamIterator { + public: + typedef T value_type; + typedef const T& reference; + typedef ptrdiff_t difference_type; + + // ParamIterator assumes ownership of the impl_ pointer. + ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {} + ParamIterator& operator=(const ParamIterator& other) { + if (this != &other) + impl_.reset(other.impl_->Clone()); + return *this; + } + + const T& operator*() const { return *impl_->Current(); } + const T* operator->() const { return impl_->Current(); } + // Prefix version of operator++. + ParamIterator& operator++() { + impl_->Advance(); + return *this; + } + // Postfix version of operator++. + ParamIterator operator++(int /*unused*/) { + ParamIteratorInterface* clone = impl_->Clone(); + impl_->Advance(); + return ParamIterator(clone); + } + bool operator==(const ParamIterator& other) const { + return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_); + } + bool operator!=(const ParamIterator& other) const { + return !(*this == other); + } + + private: + friend class ParamGenerator; + explicit ParamIterator(ParamIteratorInterface* impl) : impl_(impl) {} + scoped_ptr > impl_; +}; + +// ParamGeneratorInterface is the binary interface to access generators +// defined in other translation units. +template +class ParamGeneratorInterface { + public: + typedef T ParamType; + + virtual ~ParamGeneratorInterface() {} + + // Generator interface definition + virtual ParamIteratorInterface* Begin() const = 0; + virtual ParamIteratorInterface* End() const = 0; +}; + +// Wraps ParamGeneratorInterface and provides general generator syntax +// compatible with the STL Container concept. +// This class implements copy initialization semantics and the contained +// ParamGeneratorInterface instance is shared among all copies +// of the original object. This is possible because that instance is immutable. +template +class ParamGenerator { + public: + typedef ParamIterator iterator; + + explicit ParamGenerator(ParamGeneratorInterface* impl) : impl_(impl) {} + ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {} + + ParamGenerator& operator=(const ParamGenerator& other) { + impl_ = other.impl_; + return *this; + } + + iterator begin() const { return iterator(impl_->Begin()); } + iterator end() const { return iterator(impl_->End()); } + + private: + linked_ptr > impl_; +}; + +// Generates values from a range of two comparable values. Can be used to +// generate sequences of user-defined types that implement operator+() and +// operator<(). +// This class is used in the Range() function. +template +class RangeGenerator : public ParamGeneratorInterface { + public: + RangeGenerator(T begin, T end, IncrementT step) + : begin_(begin), end_(end), + step_(step), end_index_(CalculateEndIndex(begin, end, step)) {} + virtual ~RangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, begin_, 0, step_); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, end_, end_index_, step_); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, T value, int index, + IncrementT step) + : base_(base), value_(value), index_(index), step_(step) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + value_ = value_ + step_; + index_++; + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const T* Current() const { return &value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const int other_index = + CheckedDowncastToActualType(&other)->index_; + return index_ == other_index; + } + + private: + Iterator(const Iterator& other) + : ParamIteratorInterface(), + base_(other.base_), value_(other.value_), index_(other.index_), + step_(other.step_) {} + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + T value_; + int index_; + const IncrementT step_; + }; // class RangeGenerator::Iterator + + static int CalculateEndIndex(const T& begin, + const T& end, + const IncrementT& step) { + int end_index = 0; + for (T i = begin; i < end; i = i + step) + end_index++; + return end_index; + } + + // No implementation - assignment is unsupported. + void operator=(const RangeGenerator& other); + + const T begin_; + const T end_; + const IncrementT step_; + // The index for the end() iterator. All the elements in the generated + // sequence are indexed (0-based) to aid iterator comparison. + const int end_index_; +}; // class RangeGenerator + + +// Generates values from a pair of STL-style iterators. Used in the +// ValuesIn() function. The elements are copied from the source range +// since the source can be located on the stack, and the generator +// is likely to persist beyond that stack frame. +template +class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { + public: + template + ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end) + : container_(begin, end) {} + virtual ~ValuesInIteratorRangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, container_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, container_.end()); + } + + private: + typedef typename ::std::vector ContainerType; + + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + typename ContainerType::const_iterator iterator) + : base_(base), iterator_(iterator) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + ++iterator_; + value_.reset(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + // We need to use cached value referenced by iterator_ because *iterator_ + // can return a temporary object (and of type other then T), so just + // having "return &*iterator_;" doesn't work. + // value_ is updated here and not in Advance() because Advance() + // can advance iterator_ beyond the end of the range, and we cannot + // detect that fact. The client code, on the other hand, is + // responsible for not calling Current() on an out-of-range iterator. + virtual const T* Current() const { + if (value_.get() == NULL) + value_.reset(new T(*iterator_)); + return value_.get(); + } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + return iterator_ == + CheckedDowncastToActualType(&other)->iterator_; + } + + private: + Iterator(const Iterator& other) + // The explicit constructor call suppresses a false warning + // emitted by gcc when supplied with the -Wextra option. + : ParamIteratorInterface(), + base_(other.base_), + iterator_(other.iterator_) {} + + const ParamGeneratorInterface* const base_; + typename ContainerType::const_iterator iterator_; + // A cached value of *iterator_. We keep it here to allow access by + // pointer in the wrapping iterator's operator->(). + // value_ needs to be mutable to be accessed in Current(). + // Use of scoped_ptr helps manage cached value's lifetime, + // which is bound by the lifespan of the iterator itself. + mutable scoped_ptr value_; + }; // class ValuesInIteratorRangeGenerator::Iterator + + // No implementation - assignment is unsupported. + void operator=(const ValuesInIteratorRangeGenerator& other); + + const ContainerType container_; +}; // class ValuesInIteratorRangeGenerator + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Stores a parameter value and later creates tests parameterized with that +// value. +template +class ParameterizedTestFactory : public TestFactoryBase { + public: + typedef typename TestClass::ParamType ParamType; + explicit ParameterizedTestFactory(ParamType parameter) : + parameter_(parameter) {} + virtual Test* CreateTest() { + TestClass::SetParam(¶meter_); + return new TestClass(); + } + + private: + const ParamType parameter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactoryBase is a base class for meta-factories that create +// test factories for passing into MakeAndRegisterTestInfo function. +template +class TestMetaFactoryBase { + public: + virtual ~TestMetaFactoryBase() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0; +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactory creates test factories for passing into +// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives +// ownership of test factory pointer, same factory object cannot be passed +// into that method twice. But ParameterizedTestCaseInfo is going to call +// it for each Test/Parameter value combination. Thus it needs meta factory +// creator class. +template +class TestMetaFactory + : public TestMetaFactoryBase { + public: + typedef typename TestCase::ParamType ParamType; + + TestMetaFactory() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) { + return new ParameterizedTestFactory(parameter); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfoBase is a generic interface +// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase +// accumulates test information provided by TEST_P macro invocations +// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations +// and uses that information to register all resulting test instances +// in RegisterTests method. The ParameterizeTestCaseRegistry class holds +// a collection of pointers to the ParameterizedTestCaseInfo objects +// and calls RegisterTests() on each of them when asked. +class ParameterizedTestCaseInfoBase { + public: + virtual ~ParameterizedTestCaseInfoBase() {} + + // Base part of test case name for display purposes. + virtual const string& GetTestCaseName() const = 0; + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const = 0; + // UnitTest class invokes this method to register tests in this + // test case right before running them in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + virtual void RegisterTests() = 0; + + protected: + ParameterizedTestCaseInfoBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P +// macro invocations for a particular test case and generators +// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that +// test case. It registers tests with all values generated by all +// generators when asked. +template +class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { + public: + // ParamType and GeneratorCreationFunc are private types but are required + // for declarations of public methods AddTestPattern() and + // AddTestCaseInstantiation(). + typedef typename TestCase::ParamType ParamType; + // A function that returns an instance of appropriate generator type. + typedef ParamGenerator(GeneratorCreationFunc)(); + + explicit ParameterizedTestCaseInfo(const char* name) + : test_case_name_(name) {} + + // Test case base name for display purposes. + virtual const string& GetTestCaseName() const { return test_case_name_; } + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const { return GetTypeId(); } + // TEST_P macro uses AddTestPattern() to record information + // about a single test in a LocalTestInfo structure. + // test_case_name is the base name of the test case (without invocation + // prefix). test_base_name is the name of an individual test without + // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is + // test case base name and DoBar is test base name. + void AddTestPattern(const char* test_case_name, + const char* test_base_name, + TestMetaFactoryBase* meta_factory) { + tests_.push_back(linked_ptr(new TestInfo(test_case_name, + test_base_name, + meta_factory))); + } + // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information + // about a generator. + int AddTestCaseInstantiation(const string& instantiation_name, + GeneratorCreationFunc* func, + const char* /* file */, + int /* line */) { + instantiations_.push_back(::std::make_pair(instantiation_name, func)); + return 0; // Return value used only to run this method in namespace scope. + } + // UnitTest class invokes this method to register tests in this test case + // test cases right before running tests in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + // UnitTest has a guard to prevent from calling this method more then once. + virtual void RegisterTests() { + for (typename TestInfoContainer::iterator test_it = tests_.begin(); + test_it != tests_.end(); ++test_it) { + linked_ptr test_info = *test_it; + for (typename InstantiationContainer::iterator gen_it = + instantiations_.begin(); gen_it != instantiations_.end(); + ++gen_it) { + const string& instantiation_name = gen_it->first; + ParamGenerator generator((*gen_it->second)()); + + Message test_case_name_stream; + if ( !instantiation_name.empty() ) + test_case_name_stream << instantiation_name << "/"; + test_case_name_stream << test_info->test_case_base_name; + + int i = 0; + for (typename ParamGenerator::iterator param_it = + generator.begin(); + param_it != generator.end(); ++param_it, ++i) { + Message test_name_stream; + test_name_stream << test_info->test_base_name << "/" << i; + MakeAndRegisterTestInfo( + test_case_name_stream.GetString().c_str(), + test_name_stream.GetString().c_str(), + NULL, // No type parameter. + PrintToString(*param_it).c_str(), + GetTestCaseTypeId(), + TestCase::SetUpTestCase, + TestCase::TearDownTestCase, + test_info->test_meta_factory->CreateTestFactory(*param_it)); + } // for param_it + } // for gen_it + } // for test_it + } // RegisterTests + + private: + // LocalTestInfo structure keeps information about a single test registered + // with TEST_P macro. + struct TestInfo { + TestInfo(const char* a_test_case_base_name, + const char* a_test_base_name, + TestMetaFactoryBase* a_test_meta_factory) : + test_case_base_name(a_test_case_base_name), + test_base_name(a_test_base_name), + test_meta_factory(a_test_meta_factory) {} + + const string test_case_base_name; + const string test_base_name; + const scoped_ptr > test_meta_factory; + }; + typedef ::std::vector > TestInfoContainer; + // Keeps pairs of + // received from INSTANTIATE_TEST_CASE_P macros. + typedef ::std::vector > + InstantiationContainer; + + const string test_case_name_; + TestInfoContainer tests_; + InstantiationContainer instantiations_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo); +}; // class ParameterizedTestCaseInfo + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase +// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P +// macros use it to locate their corresponding ParameterizedTestCaseInfo +// descriptors. +class ParameterizedTestCaseRegistry { + public: + ParameterizedTestCaseRegistry() {} + ~ParameterizedTestCaseRegistry() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + delete *it; + } + } + + // Looks up or creates and returns a structure containing information about + // tests and instantiations of a particular test case. + template + ParameterizedTestCaseInfo* GetTestCasePatternHolder( + const char* test_case_name, + const char* file, + int line) { + ParameterizedTestCaseInfo* typed_test_info = NULL; + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + if ((*it)->GetTestCaseName() == test_case_name) { + if ((*it)->GetTestCaseTypeId() != GetTypeId()) { + // Complain about incorrect usage of Google Test facilities + // and terminate the program since we cannot guaranty correct + // test case setup and tear-down in this case. + ReportInvalidTestCaseType(test_case_name, file, line); + posix::Abort(); + } else { + // At this point we are sure that the object we found is of the same + // type we are looking for, so we downcast it to that type + // without further checks. + typed_test_info = CheckedDowncastToActualType< + ParameterizedTestCaseInfo >(*it); + } + break; + } + } + if (typed_test_info == NULL) { + typed_test_info = new ParameterizedTestCaseInfo(test_case_name); + test_case_infos_.push_back(typed_test_info); + } + return typed_test_info; + } + void RegisterTests() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + (*it)->RegisterTests(); + } + } + + private: + typedef ::std::vector TestCaseInfoContainer; + + TestCaseInfoContainer test_case_infos_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry); +}; + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +// This file was GENERATED by command: +// pump.py gtest-param-util-generated.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently Google Test supports at most 50 arguments in Values, +// and at most 10 arguments in Combine. Please contact +// googletestframework@googlegroups.com if you need more. +// Please note that the number of arguments to Combine is limited +// by the maximum arity of the implementation of tr1::tuple which is +// currently set at 10. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Forward declarations of ValuesIn(), which is implemented in +// include/gtest/gtest-param-test.h. +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end); + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]); + +template +internal::ParamGenerator ValuesIn( + const Container& container); + +namespace internal { + +// Used in the Values() function to provide polymorphic capabilities. +template +class ValueArray1 { + public: + explicit ValueArray1(T1 v1) : v1_(v1) {} + + template + operator ParamGenerator() const { return ValuesIn(&v1_, &v1_ + 1); } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray1& other); + + const T1 v1_; +}; + +template +class ValueArray2 { + public: + ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray2& other); + + const T1 v1_; + const T2 v2_; +}; + +template +class ValueArray3 { + public: + ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray3& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; +}; + +template +class ValueArray4 { + public: + ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray4& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; +}; + +template +class ValueArray5 { + public: + ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray5& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; +}; + +template +class ValueArray6 { + public: + ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray6& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; +}; + +template +class ValueArray7 { + public: + ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray7& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; +}; + +template +class ValueArray8 { + public: + ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray8& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; +}; + +template +class ValueArray9 { + public: + ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray9& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; +}; + +template +class ValueArray10 { + public: + ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray10& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; +}; + +template +class ValueArray11 { + public: + ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray11& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; +}; + +template +class ValueArray12 { + public: + ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray12& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; +}; + +template +class ValueArray13 { + public: + ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray13& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; +}; + +template +class ValueArray14 { + public: + ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray14& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; +}; + +template +class ValueArray15 { + public: + ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray15& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; +}; + +template +class ValueArray16 { + public: + ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray16& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; +}; + +template +class ValueArray17 { + public: + ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray17& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; +}; + +template +class ValueArray18 { + public: + ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray18& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; +}; + +template +class ValueArray19 { + public: + ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray19& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; +}; + +template +class ValueArray20 { + public: + ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray20& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; +}; + +template +class ValueArray21 { + public: + ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray21& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; +}; + +template +class ValueArray22 { + public: + ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray22& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; +}; + +template +class ValueArray23 { + public: + ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, + v23_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray23& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; +}; + +template +class ValueArray24 { + public: + ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray24& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; +}; + +template +class ValueArray25 { + public: + ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray25& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; +}; + +template +class ValueArray26 { + public: + ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray26& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; +}; + +template +class ValueArray27 { + public: + ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray27& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; +}; + +template +class ValueArray28 { + public: + ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray28& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; +}; + +template +class ValueArray29 { + public: + ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray29& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; +}; + +template +class ValueArray30 { + public: + ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray30& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; +}; + +template +class ValueArray31 { + public: + ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray31& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; +}; + +template +class ValueArray32 { + public: + ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray32& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; +}; + +template +class ValueArray33 { + public: + ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray33& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; +}; + +template +class ValueArray34 { + public: + ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray34& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; +}; + +template +class ValueArray35 { + public: + ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, + v35_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray35& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; +}; + +template +class ValueArray36 { + public: + ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray36& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; +}; + +template +class ValueArray37 { + public: + ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray37& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; +}; + +template +class ValueArray38 { + public: + ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray38& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; +}; + +template +class ValueArray39 { + public: + ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray39& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; +}; + +template +class ValueArray40 { + public: + ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray40& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; +}; + +template +class ValueArray41 { + public: + ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray41& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; +}; + +template +class ValueArray42 { + public: + ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray42& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; +}; + +template +class ValueArray43 { + public: + ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), + v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray43& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; +}; + +template +class ValueArray44 { + public: + ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), + v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), + v43_(v43), v44_(v44) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray44& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; +}; + +template +class ValueArray45 { + public: + ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), + v42_(v42), v43_(v43), v44_(v44), v45_(v45) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray45& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; +}; + +template +class ValueArray46 { + public: + ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray46& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; +}; + +template +class ValueArray47 { + public: + ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46), + v47_(v47) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, + v47_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray47& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; +}; + +template +class ValueArray48 { + public: + ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), + v46_(v46), v47_(v47), v48_(v48) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, + v48_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray48& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; +}; + +template +class ValueArray49 { + public: + ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, + T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, + v48_, v49_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray49& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; +}; + +template +class ValueArray50 { + public: + ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49, + T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {} + + template + operator ParamGenerator() const { + const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, + v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, + v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, + v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, + v48_, v49_, v50_}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray50& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; + const T50 v50_; +}; + +# if GTEST_HAS_COMBINE +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Generates values from the Cartesian product of values produced +// by the argument generators. +// +template +class CartesianProductGenerator2 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator2(const ParamGenerator& g1, + const ParamGenerator& g2) + : g1_(g1), g2_(g2) {} + virtual ~CartesianProductGenerator2() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current2_; + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + ParamType current_value_; + }; // class CartesianProductGenerator2::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator2& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; +}; // class CartesianProductGenerator2 + + +template +class CartesianProductGenerator3 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator3(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + virtual ~CartesianProductGenerator3() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current3_; + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + ParamType current_value_; + }; // class CartesianProductGenerator3::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator3& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; +}; // class CartesianProductGenerator3 + + +template +class CartesianProductGenerator4 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator4(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + virtual ~CartesianProductGenerator4() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current4_; + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + ParamType current_value_; + }; // class CartesianProductGenerator4::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator4& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; +}; // class CartesianProductGenerator4 + + +template +class CartesianProductGenerator5 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator5(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + virtual ~CartesianProductGenerator5() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current5_; + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + ParamType current_value_; + }; // class CartesianProductGenerator5::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator5& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; +}; // class CartesianProductGenerator5 + + +template +class CartesianProductGenerator6 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator6(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + virtual ~CartesianProductGenerator6() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current6_; + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + ParamType current_value_; + }; // class CartesianProductGenerator6::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator6& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; +}; // class CartesianProductGenerator6 + + +template +class CartesianProductGenerator7 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator7(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + virtual ~CartesianProductGenerator7() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current7_; + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + ParamType current_value_; + }; // class CartesianProductGenerator7::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator7& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; +}; // class CartesianProductGenerator7 + + +template +class CartesianProductGenerator8 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator8(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + virtual ~CartesianProductGenerator8() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current8_; + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + ParamType current_value_; + }; // class CartesianProductGenerator8::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator8& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; +}; // class CartesianProductGenerator8 + + +template +class CartesianProductGenerator9 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator9(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + virtual ~CartesianProductGenerator9() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current9_; + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + ParamType current_value_; + }; // class CartesianProductGenerator9::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator9& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; +}; // class CartesianProductGenerator9 + + +template +class CartesianProductGenerator10 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator10(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9, + const ParamGenerator& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + virtual ~CartesianProductGenerator10() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end(), g10_, g10_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9, + const ParamGenerator& g10, + const typename ParamGenerator::iterator& current10) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9), + begin10_(g10.begin()), end10_(g10.end()), current10_(current10) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current10_; + if (current10_ == end10_) { + current10_ = begin10_; + ++current9_; + } + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_ && + current10_ == typed_other->current10_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_), + begin10_(other.begin10_), + end10_(other.end10_), + current10_(other.current10_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_, *current10_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_ || + current10_ == end10_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + const typename ParamGenerator::iterator begin10_; + const typename ParamGenerator::iterator end10_; + typename ParamGenerator::iterator current10_; + ParamType current_value_; + }; // class CartesianProductGenerator10::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator10& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; + const ParamGenerator g10_; +}; // class CartesianProductGenerator10 + + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Helper classes providing Combine() with polymorphic features. They allow +// casting CartesianProductGeneratorN to ParamGenerator if T is +// convertible to U. +// +template +class CartesianProductHolder2 { + public: +CartesianProductHolder2(const Generator1& g1, const Generator2& g2) + : g1_(g1), g2_(g2) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator2( + static_cast >(g1_), + static_cast >(g2_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder2& other); + + const Generator1 g1_; + const Generator2 g2_; +}; // class CartesianProductHolder2 + +template +class CartesianProductHolder3 { + public: +CartesianProductHolder3(const Generator1& g1, const Generator2& g2, + const Generator3& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator3( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder3& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; +}; // class CartesianProductHolder3 + +template +class CartesianProductHolder4 { + public: +CartesianProductHolder4(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator4( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder4& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; +}; // class CartesianProductHolder4 + +template +class CartesianProductHolder5 { + public: +CartesianProductHolder5(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator5( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder5& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; +}; // class CartesianProductHolder5 + +template +class CartesianProductHolder6 { + public: +CartesianProductHolder6(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator6( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder6& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; +}; // class CartesianProductHolder6 + +template +class CartesianProductHolder7 { + public: +CartesianProductHolder7(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator7( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder7& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; +}; // class CartesianProductHolder7 + +template +class CartesianProductHolder8 { + public: +CartesianProductHolder8(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator8( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder8& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; +}; // class CartesianProductHolder8 + +template +class CartesianProductHolder9 { + public: +CartesianProductHolder9(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator9( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder9& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; +}; // class CartesianProductHolder9 + +template +class CartesianProductHolder10 { + public: +CartesianProductHolder10(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9, const Generator10& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator10( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_), + static_cast >(g10_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder10& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; + const Generator10 g10_; +}; // class CartesianProductHolder10 + +# endif // GTEST_HAS_COMBINE + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Functions producing parameter generators. +// +// Google Test uses these generators to produce parameters for value- +// parameterized tests. When a parameterized test case is instantiated +// with a particular generator, Google Test creates and runs tests +// for each element in the sequence produced by the generator. +// +// In the following sample, tests from test case FooTest are instantiated +// each three times with parameter values 3, 5, and 8: +// +// class FooTest : public TestWithParam { ... }; +// +// TEST_P(FooTest, TestThis) { +// } +// TEST_P(FooTest, TestThat) { +// } +// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8)); +// + +// Range() returns generators providing sequences of values in a range. +// +// Synopsis: +// Range(start, end) +// - returns a generator producing a sequence of values {start, start+1, +// start+2, ..., }. +// Range(start, end, step) +// - returns a generator producing a sequence of values {start, start+step, +// start+step+step, ..., }. +// Notes: +// * The generated sequences never include end. For example, Range(1, 5) +// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2) +// returns a generator producing {1, 3, 5, 7}. +// * start and end must have the same type. That type may be any integral or +// floating-point type or a user defined type satisfying these conditions: +// * It must be assignable (have operator=() defined). +// * It must have operator+() (operator+(int-compatible type) for +// two-operand version). +// * It must have operator<() defined. +// Elements in the resulting sequences will also have that type. +// * Condition start < end must be satisfied in order for resulting sequences +// to contain any elements. +// +template +internal::ParamGenerator Range(T start, T end, IncrementT step) { + return internal::ParamGenerator( + new internal::RangeGenerator(start, end, step)); +} + +template +internal::ParamGenerator Range(T start, T end) { + return Range(start, end, 1); +} + +// ValuesIn() function allows generation of tests with parameters coming from +// a container. +// +// Synopsis: +// ValuesIn(const T (&array)[N]) +// - returns a generator producing sequences with elements from +// a C-style array. +// ValuesIn(const Container& container) +// - returns a generator producing sequences with elements from +// an STL-style container. +// ValuesIn(Iterator begin, Iterator end) +// - returns a generator producing sequences with elements from +// a range [begin, end) defined by a pair of STL-style iterators. These +// iterators can also be plain C pointers. +// +// Please note that ValuesIn copies the values from the containers +// passed in and keeps them to generate tests in RUN_ALL_TESTS(). +// +// Examples: +// +// This instantiates tests from test case StringTest +// each with C-string values of "foo", "bar", and "baz": +// +// const char* strings[] = {"foo", "bar", "baz"}; +// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings)); +// +// This instantiates tests from test case StlStringTest +// each with STL strings with values "a" and "b": +// +// ::std::vector< ::std::string> GetParameterStrings() { +// ::std::vector< ::std::string> v; +// v.push_back("a"); +// v.push_back("b"); +// return v; +// } +// +// INSTANTIATE_TEST_CASE_P(CharSequence, +// StlStringTest, +// ValuesIn(GetParameterStrings())); +// +// +// This will also instantiate tests from CharTest +// each with parameter values 'a' and 'b': +// +// ::std::list GetParameterChars() { +// ::std::list list; +// list.push_back('a'); +// list.push_back('b'); +// return list; +// } +// ::std::list l = GetParameterChars(); +// INSTANTIATE_TEST_CASE_P(CharSequence2, +// CharTest, +// ValuesIn(l.begin(), l.end())); +// +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end) { + typedef typename ::testing::internal::IteratorTraits + ::value_type ParamType; + return internal::ParamGenerator( + new internal::ValuesInIteratorRangeGenerator(begin, end)); +} + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]) { + return ValuesIn(array, array + N); +} + +template +internal::ParamGenerator ValuesIn( + const Container& container) { + return ValuesIn(container.begin(), container.end()); +} + +// Values() allows generating tests from explicitly specified list of +// parameters. +// +// Synopsis: +// Values(T v1, T v2, ..., T vN) +// - returns a generator producing sequences with elements v1, v2, ..., vN. +// +// For example, this instantiates tests from test case BarTest each +// with values "one", "two", and "three": +// +// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three")); +// +// This instantiates tests from test case BazTest each with values 1, 2, 3.5. +// The exact type of values will depend on the type of parameter in BazTest. +// +// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5)); +// +// Currently, Values() supports from 1 to 50 parameters. +// +template +internal::ValueArray1 Values(T1 v1) { + return internal::ValueArray1(v1); +} + +template +internal::ValueArray2 Values(T1 v1, T2 v2) { + return internal::ValueArray2(v1, v2); +} + +template +internal::ValueArray3 Values(T1 v1, T2 v2, T3 v3) { + return internal::ValueArray3(v1, v2, v3); +} + +template +internal::ValueArray4 Values(T1 v1, T2 v2, T3 v3, T4 v4) { + return internal::ValueArray4(v1, v2, v3, v4); +} + +template +internal::ValueArray5 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5) { + return internal::ValueArray5(v1, v2, v3, v4, v5); +} + +template +internal::ValueArray6 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6) { + return internal::ValueArray6(v1, v2, v3, v4, v5, v6); +} + +template +internal::ValueArray7 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7) { + return internal::ValueArray7(v1, v2, v3, v4, v5, + v6, v7); +} + +template +internal::ValueArray8 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) { + return internal::ValueArray8(v1, v2, v3, v4, + v5, v6, v7, v8); +} + +template +internal::ValueArray9 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) { + return internal::ValueArray9(v1, v2, v3, + v4, v5, v6, v7, v8, v9); +} + +template +internal::ValueArray10 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) { + return internal::ValueArray10(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10); +} + +template +internal::ValueArray11 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) { + return internal::ValueArray11(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11); +} + +template +internal::ValueArray12 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) { + return internal::ValueArray12(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12); +} + +template +internal::ValueArray13 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) { + return internal::ValueArray13(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13); +} + +template +internal::ValueArray14 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) { + return internal::ValueArray14(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14); +} + +template +internal::ValueArray15 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) { + return internal::ValueArray15(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15); +} + +template +internal::ValueArray16 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16) { + return internal::ValueArray16(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16); +} + +template +internal::ValueArray17 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17) { + return internal::ValueArray17(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17); +} + +template +internal::ValueArray18 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18) { + return internal::ValueArray18(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18); +} + +template +internal::ValueArray19 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) { + return internal::ValueArray19(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19); +} + +template +internal::ValueArray20 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) { + return internal::ValueArray20(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20); +} + +template +internal::ValueArray21 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) { + return internal::ValueArray21(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21); +} + +template +internal::ValueArray22 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22) { + return internal::ValueArray22(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22); +} + +template +internal::ValueArray23 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23) { + return internal::ValueArray23(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23); +} + +template +internal::ValueArray24 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24) { + return internal::ValueArray24(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24); +} + +template +internal::ValueArray25 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) { + return internal::ValueArray25(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25); +} + +template +internal::ValueArray26 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) { + return internal::ValueArray26(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26); +} + +template +internal::ValueArray27 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) { + return internal::ValueArray27(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27); +} + +template +internal::ValueArray28 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) { + return internal::ValueArray28(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28); +} + +template +internal::ValueArray29 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) { + return internal::ValueArray29(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29); +} + +template +internal::ValueArray30 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) { + return internal::ValueArray30(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30); +} + +template +internal::ValueArray31 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) { + return internal::ValueArray31(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31); +} + +template +internal::ValueArray32 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32) { + return internal::ValueArray32(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32); +} + +template +internal::ValueArray33 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33) { + return internal::ValueArray33(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33); +} + +template +internal::ValueArray34 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34) { + return internal::ValueArray34(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34); +} + +template +internal::ValueArray35 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) { + return internal::ValueArray35(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35); +} + +template +internal::ValueArray36 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) { + return internal::ValueArray36(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36); +} + +template +internal::ValueArray37 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37) { + return internal::ValueArray37(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37); +} + +template +internal::ValueArray38 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38) { + return internal::ValueArray38(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, + v33, v34, v35, v36, v37, v38); +} + +template +internal::ValueArray39 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38, T39 v39) { + return internal::ValueArray39(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, + v32, v33, v34, v35, v36, v37, v38, v39); +} + +template +internal::ValueArray40 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, + T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, + T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) { + return internal::ValueArray40(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, + v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40); +} + +template +internal::ValueArray41 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) { + return internal::ValueArray41(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, + v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41); +} + +template +internal::ValueArray42 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) { + return internal::ValueArray42(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, + v42); +} + +template +internal::ValueArray43 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) { + return internal::ValueArray43(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, + v41, v42, v43); +} + +template +internal::ValueArray44 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) { + return internal::ValueArray44(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, + v40, v41, v42, v43, v44); +} + +template +internal::ValueArray45 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) { + return internal::ValueArray45(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, + v39, v40, v41, v42, v43, v44, v45); +} + +template +internal::ValueArray46 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) { + return internal::ValueArray46(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46); +} + +template +internal::ValueArray47 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) { + return internal::ValueArray47(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46, v47); +} + +template +internal::ValueArray48 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, + T48 v48) { + return internal::ValueArray48(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, + v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48); +} + +template +internal::ValueArray49 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, + T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, + T47 v47, T48 v48, T49 v49) { + return internal::ValueArray49(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, + v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49); +} + +template +internal::ValueArray50 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, + T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, + T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) { + return internal::ValueArray50(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, + v48, v49, v50); +} + +// Bool() allows generating tests with parameters in a set of (false, true). +// +// Synopsis: +// Bool() +// - returns a generator producing sequences with elements {false, true}. +// +// It is useful when testing code that depends on Boolean flags. Combinations +// of multiple flags can be tested when several Bool()'s are combined using +// Combine() function. +// +// In the following example all tests in the test case FlagDependentTest +// will be instantiated twice with parameters false and true. +// +// class FlagDependentTest : public testing::TestWithParam { +// virtual void SetUp() { +// external_flag = GetParam(); +// } +// } +// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool()); +// +inline internal::ParamGenerator Bool() { + return Values(false, true); +} + +# if GTEST_HAS_COMBINE +// Combine() allows the user to combine two or more sequences to produce +// values of a Cartesian product of those sequences' elements. +// +// Synopsis: +// Combine(gen1, gen2, ..., genN) +// - returns a generator producing sequences with elements coming from +// the Cartesian product of elements from the sequences generated by +// gen1, gen2, ..., genN. The sequence elements will have a type of +// tuple where T1, T2, ..., TN are the types +// of elements from sequences produces by gen1, gen2, ..., genN. +// +// Combine can have up to 10 arguments. This number is currently limited +// by the maximum number of elements in the tuple implementation used by Google +// Test. +// +// Example: +// +// This will instantiate tests in test case AnimalTest each one with +// the parameter values tuple("cat", BLACK), tuple("cat", WHITE), +// tuple("dog", BLACK), and tuple("dog", WHITE): +// +// enum Color { BLACK, GRAY, WHITE }; +// class AnimalTest +// : public testing::TestWithParam > {...}; +// +// TEST_P(AnimalTest, AnimalLooksNice) {...} +// +// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest, +// Combine(Values("cat", "dog"), +// Values(BLACK, WHITE))); +// +// This will instantiate tests in FlagDependentTest with all variations of two +// Boolean flags: +// +// class FlagDependentTest +// : public testing::TestWithParam > { +// virtual void SetUp() { +// // Assigns external_flag_1 and external_flag_2 values from the tuple. +// tie(external_flag_1, external_flag_2) = GetParam(); +// } +// }; +// +// TEST_P(FlagDependentTest, TestFeature1) { +// // Test your code using external_flag_1 and external_flag_2 here. +// } +// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest, +// Combine(Bool(), Bool())); +// +template +internal::CartesianProductHolder2 Combine( + const Generator1& g1, const Generator2& g2) { + return internal::CartesianProductHolder2( + g1, g2); +} + +template +internal::CartesianProductHolder3 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3) { + return internal::CartesianProductHolder3( + g1, g2, g3); +} + +template +internal::CartesianProductHolder4 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4) { + return internal::CartesianProductHolder4( + g1, g2, g3, g4); +} + +template +internal::CartesianProductHolder5 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5) { + return internal::CartesianProductHolder5( + g1, g2, g3, g4, g5); +} + +template +internal::CartesianProductHolder6 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6) { + return internal::CartesianProductHolder6( + g1, g2, g3, g4, g5, g6); +} + +template +internal::CartesianProductHolder7 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7) { + return internal::CartesianProductHolder7( + g1, g2, g3, g4, g5, g6, g7); +} + +template +internal::CartesianProductHolder8 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8) { + return internal::CartesianProductHolder8( + g1, g2, g3, g4, g5, g6, g7, g8); +} + +template +internal::CartesianProductHolder9 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9) { + return internal::CartesianProductHolder9( + g1, g2, g3, g4, g5, g6, g7, g8, g9); +} + +template +internal::CartesianProductHolder10 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9, + const Generator10& g10) { + return internal::CartesianProductHolder10( + g1, g2, g3, g4, g5, g6, g7, g8, g9, g10); +} +# endif // GTEST_HAS_COMBINE + + + +# define TEST_P(test_case_name, test_name) \ + class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + : public test_case_name { \ + public: \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \ + virtual void TestBody(); \ + private: \ + static int AddToRegistry() { \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestPattern(\ + #test_case_name, \ + #test_name, \ + new ::testing::internal::TestMetaFactory< \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \ + return 0; \ + } \ + static int gtest_registering_dummy_; \ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \ + }; \ + int GTEST_TEST_CLASS_NAME_(test_case_name, \ + test_name)::gtest_registering_dummy_ = \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \ + void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \ + ::testing::internal::ParamGenerator \ + gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \ + int gtest_##prefix##test_case_name##_dummy_ = \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\ + #prefix, \ + >est_##prefix##test_case_name##_EvalGenerator_, \ + __FILE__, __LINE__) + +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Google C++ Testing Framework definitions useful in production code. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_ + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name)\ +friend class test_case_name##_##test_name##_Test + +#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ + +#include +#include + +namespace testing { + +// A copyable object representing the result of a test part (i.e. an +// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()). +// +// Don't inherit from TestPartResult as its destructor is not virtual. +class GTEST_API_ TestPartResult { + public: + // The possible outcomes of a test part (i.e. an assertion or an + // explicit SUCCEED(), FAIL(), or ADD_FAILURE()). + enum Type { + kSuccess, // Succeeded. + kNonFatalFailure, // Failed but the test can continue. + kFatalFailure // Failed and the test should be terminated. + }; + + // C'tor. TestPartResult does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestPartResult object. + TestPartResult(Type a_type, + const char* a_file_name, + int a_line_number, + const char* a_message) + : type_(a_type), + file_name_(a_file_name), + line_number_(a_line_number), + summary_(ExtractSummary(a_message)), + message_(a_message) { + } + + // Gets the outcome of the test part. + Type type() const { return type_; } + + // Gets the name of the source file where the test part took place, or + // NULL if it's unknown. + const char* file_name() const { return file_name_.c_str(); } + + // Gets the line in the source file where the test part took place, + // or -1 if it's unknown. + int line_number() const { return line_number_; } + + // Gets the summary of the failure message. + const char* summary() const { return summary_.c_str(); } + + // Gets the message associated with the test part. + const char* message() const { return message_.c_str(); } + + // Returns true iff the test part passed. + bool passed() const { return type_ == kSuccess; } + + // Returns true iff the test part failed. + bool failed() const { return type_ != kSuccess; } + + // Returns true iff the test part non-fatally failed. + bool nonfatally_failed() const { return type_ == kNonFatalFailure; } + + // Returns true iff the test part fatally failed. + bool fatally_failed() const { return type_ == kFatalFailure; } + private: + Type type_; + + // Gets the summary of the failure message by omitting the stack + // trace in it. + static internal::String ExtractSummary(const char* message); + + // The name of the source file where the test part took place, or + // NULL if the source file is unknown. + internal::String file_name_; + // The line in the source file where the test part took place, or -1 + // if the line number is unknown. + int line_number_; + internal::String summary_; // The test failure summary. + internal::String message_; // The test failure message. +}; + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result); + +// An array of TestPartResult objects. +// +// Don't inherit from TestPartResultArray as its destructor is not +// virtual. +class GTEST_API_ TestPartResultArray { + public: + TestPartResultArray() {} + + // Appends the given TestPartResult to the array. + void Append(const TestPartResult& result); + + // Returns the TestPartResult at the given index (0-based). + const TestPartResult& GetTestPartResult(int index) const; + + // Returns the number of TestPartResult objects in the array. + int size() const; + + private: + std::vector array_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray); +}; + +// This interface knows how to report a test part result. +class TestPartResultReporterInterface { + public: + virtual ~TestPartResultReporterInterface() {} + + virtual void ReportTestPartResult(const TestPartResult& result) = 0; +}; + +namespace internal { + +// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a +// statement generates new fatal failures. To do so it registers itself as the +// current test part result reporter. Besides checking if fatal failures were +// reported, it only delegates the reporting to the former result reporter. +// The original result reporter is restored in the destructor. +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +class GTEST_API_ HasNewFatalFailureHelper + : public TestPartResultReporterInterface { + public: + HasNewFatalFailureHelper(); + virtual ~HasNewFatalFailureHelper(); + virtual void ReportTestPartResult(const TestPartResult& result); + bool has_new_fatal_failure() const { return has_new_fatal_failure_; } + private: + bool has_new_fatal_failure_; + TestPartResultReporterInterface* original_reporter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); +}; + +} // namespace internal + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// This header implements typed tests and type-parameterized tests. + +// Typed (aka type-driven) tests repeat the same test for types in a +// list. You must know which types you want to test with when writing +// typed tests. Here's how you do it: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + public: + ... + typedef std::list List; + static T shared_; + T value_; +}; + +// Next, associate a list of types with the test case, which will be +// repeated for each type in the list. The typedef is necessary for +// the macro to parse correctly. +typedef testing::Types MyTypes; +TYPED_TEST_CASE(FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// TYPED_TEST_CASE(FooTest, int); + +// Then, use TYPED_TEST() instead of TEST_F() to define as many typed +// tests for this test case as you want. +TYPED_TEST(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + // Since we are inside a derived class template, C++ requires use to + // visit the members of FooTest via 'this'. + TypeParam n = this->value_; + + // To visit static members of the fixture, add the TestFixture:: + // prefix. + n += TestFixture::shared_; + + // To refer to typedefs in the fixture, add the "typename + // TestFixture::" prefix. + typename TestFixture::List values; + values.push_back(n); + ... +} + +TYPED_TEST(FooTest, HasPropertyA) { ... } + +#endif // 0 + +// Type-parameterized tests are abstract test patterns parameterized +// by a type. Compared with typed tests, type-parameterized tests +// allow you to define the test pattern without knowing what the type +// parameters are. The defined pattern can be instantiated with +// different types any number of times, in any number of translation +// units. +// +// If you are designing an interface or concept, you can define a +// suite of type-parameterized tests to verify properties that any +// valid implementation of the interface/concept should have. Then, +// each implementation can easily instantiate the test suite to verify +// that it conforms to the requirements, without having to write +// similar tests repeatedly. Here's an example: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + ... +}; + +// Next, declare that you will define a type-parameterized test case +// (the _P suffix is for "parameterized" or "pattern", whichever you +// prefer): +TYPED_TEST_CASE_P(FooTest); + +// Then, use TYPED_TEST_P() to define as many type-parameterized tests +// for this type-parameterized test case as you want. +TYPED_TEST_P(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + TypeParam n = 0; + ... +} + +TYPED_TEST_P(FooTest, HasPropertyA) { ... } + +// Now the tricky part: you need to register all test patterns before +// you can instantiate them. The first argument of the macro is the +// test case name; the rest are the names of the tests in this test +// case. +REGISTER_TYPED_TEST_CASE_P(FooTest, + DoesBlah, HasPropertyA); + +// Finally, you are free to instantiate the pattern with the types you +// want. If you put the above code in a header file, you can #include +// it in multiple C++ source files and instantiate it multiple times. +// +// To distinguish different instances of the pattern, the first +// argument to the INSTANTIATE_* macro is a prefix that will be added +// to the actual test case name. Remember to pick unique prefixes for +// different instances. +typedef testing::Types MyTypes; +INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int); + +#endif // 0 + + +// Implements typed tests. + +#if GTEST_HAS_TYPED_TEST + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the typedef for the type parameters of the +// given test case. +# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_ + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define TYPED_TEST_CASE(CaseName, Types) \ + typedef ::testing::internal::TypeList< Types >::type \ + GTEST_TYPE_PARAMS_(CaseName) + +# define TYPED_TEST(CaseName, TestName) \ + template \ + class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \ + : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTest< \ + CaseName, \ + ::testing::internal::TemplateSel< \ + GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \ + GTEST_TYPE_PARAMS_(CaseName)>::Register(\ + "", #CaseName, #TestName, 0); \ + template \ + void GTEST_TEST_CLASS_NAME_(CaseName, TestName)::TestBody() + +#endif // GTEST_HAS_TYPED_TEST + +// Implements type-parameterized tests. + +#if GTEST_HAS_TYPED_TEST_P + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the namespace name that the type-parameterized tests for +// the given type-parameterized test case are defined in. The exact +// name of the namespace is subject to change without notice. +# define GTEST_CASE_NAMESPACE_(TestCaseName) \ + gtest_case_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the variable used to remember the names of +// the defined tests in the given test case. +# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \ + gtest_typed_test_case_p_state_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY. +// +// Expands to the name of the variable used to remember the names of +// the registered tests in the given test case. +# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \ + gtest_registered_test_names_##TestCaseName##_ + +// The variables defined in the type-parameterized test macros are +// static as typically these macros are used in a .h file that can be +// #included in multiple translation units linked together. +# define TYPED_TEST_CASE_P(CaseName) \ + static ::testing::internal::TypedTestCasePState \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName) + +# define TYPED_TEST_P(CaseName, TestName) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + template \ + class TestName : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\ + __FILE__, __LINE__, #CaseName, #TestName); \ + } \ + template \ + void GTEST_CASE_NAMESPACE_(CaseName)::TestName::TestBody() + +# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \ + } \ + static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\ + __FILE__, __LINE__, #__VA_ARGS__) + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \ + bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTestCase::type>::Register(\ + #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName)) + +#endif // GTEST_HAS_TYPED_TEST_P + +#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// Depending on the platform, different string classes are available. +// On Linux, in addition to ::std::string, Google also makes use of +// class ::string, which has the same interface as ::std::string, but +// has a different implementation. +// +// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that +// ::string is available AND is a distinct type to ::std::string, or +// define it to 0 to indicate otherwise. +// +// If the user's ::std::string and ::string are the same class due to +// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0. +// +// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined +// heuristically. + +namespace testing { + +// Declares the flags. + +// This flag temporary enables the disabled tests. +GTEST_DECLARE_bool_(also_run_disabled_tests); + +// This flag brings the debugger on an assertion failure. +GTEST_DECLARE_bool_(break_on_failure); + +// This flag controls whether Google Test catches all test-thrown exceptions +// and logs them as failures. +GTEST_DECLARE_bool_(catch_exceptions); + +// This flag enables using colors in terminal output. Available values are +// "yes" to enable colors, "no" (disable colors), or "auto" (the default) +// to let Google Test decide. +GTEST_DECLARE_string_(color); + +// This flag sets up the filter to select by name using a glob pattern +// the tests to run. If the filter is not given all tests are executed. +GTEST_DECLARE_string_(filter); + +// This flag causes the Google Test to list tests. None of the tests listed +// are actually run if the flag is provided. +GTEST_DECLARE_bool_(list_tests); + +// This flag controls whether Google Test emits a detailed XML report to a file +// in addition to its normal textual output. +GTEST_DECLARE_string_(output); + +// This flags control whether Google Test prints the elapsed time for each +// test. +GTEST_DECLARE_bool_(print_time); + +// This flag specifies the random number seed. +GTEST_DECLARE_int32_(random_seed); + +// This flag sets how many times the tests are repeated. The default value +// is 1. If the value is -1 the tests are repeating forever. +GTEST_DECLARE_int32_(repeat); + +// This flag controls whether Google Test includes Google Test internal +// stack frames in failure stack traces. +GTEST_DECLARE_bool_(show_internal_stack_frames); + +// When this flag is specified, tests' order is randomized on every iteration. +GTEST_DECLARE_bool_(shuffle); + +// This flag specifies the maximum number of stack frames to be +// printed in a failure message. +GTEST_DECLARE_int32_(stack_trace_depth); + +// When this flag is specified, a failed assertion will throw an +// exception if exceptions are enabled, or exit the program with a +// non-zero code otherwise. +GTEST_DECLARE_bool_(throw_on_failure); + +// When this flag is set with a "host:port" string, on supported +// platforms test results are streamed to the specified port on +// the specified host machine. +GTEST_DECLARE_string_(stream_result_to); + +// The upper limit for valid stack trace depths. +const int kMaxStackTraceDepth = 100; + +namespace internal { + +class AssertHelper; +class DefaultGlobalTestPartResultReporter; +class ExecDeathTest; +class NoExecDeathTest; +class FinalSuccessChecker; +class GTestFlagSaver; +class TestResultAccessor; +class TestEventListenersAccessor; +class TestEventRepeater; +class WindowsDeathTest; +class UnitTestImpl* GetUnitTestImpl(); +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const String& message); + +// Converts a streamable value to a String. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +// Declared in gtest-internal.h but defined here, so that it has access +// to the definition of the Message class, required by the ARM +// compiler. +template +String StreamableToString(const T& streamable) { + return (Message() << streamable).GetString(); +} + +} // namespace internal + +// The friend relationship of some of these classes is cyclic. +// If we don't forward declare them the compiler might confuse the classes +// in friendship clauses with same named classes on the scope. +class Test; +class TestCase; +class TestInfo; +class UnitTest; + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + // Used in the EXPECT_TRUE/FALSE(bool_expression). + explicit AssertionResult(bool success) : success_(success) {} + + // Returns true iff the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != NULL ? message_->c_str() : ""; + } + // TODO(vladl@google.com): Remove this after making sure no clients use it. + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == NULL) + message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + internal::scoped_ptr< ::std::string> message_; + + GTEST_DISALLOW_ASSIGN_(AssertionResult); +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +// The abstract class that all tests inherit from. +// +// In Google Test, a unit test program contains one or many TestCases, and +// each TestCase contains one or many Tests. +// +// When you define a test using the TEST macro, you don't need to +// explicitly derive from Test - the TEST macro automatically does +// this for you. +// +// The only time you derive from Test is when defining a test fixture +// to be used a TEST_F. For example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { ... } +// virtual void TearDown() { ... } +// ... +// }; +// +// TEST_F(FooTest, Bar) { ... } +// TEST_F(FooTest, Baz) { ... } +// +// Test is not copyable. +class GTEST_API_ Test { + public: + friend class TestInfo; + + // Defines types for pointers to functions that set up and tear down + // a test case. + typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc; + typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc; + + // The d'tor is virtual as we intend to inherit from Test. + virtual ~Test(); + + // Sets up the stuff shared by all tests in this test case. + // + // Google Test will call Foo::SetUpTestCase() before running the first + // test in test case Foo. Hence a sub-class can define its own + // SetUpTestCase() method to shadow the one defined in the super + // class. + static void SetUpTestCase() {} + + // Tears down the stuff shared by all tests in this test case. + // + // Google Test will call Foo::TearDownTestCase() after running the last + // test in test case Foo. Hence a sub-class can define its own + // TearDownTestCase() method to shadow the one defined in the super + // class. + static void TearDownTestCase() {} + + // Returns true iff the current test has a fatal failure. + static bool HasFatalFailure(); + + // Returns true iff the current test has a non-fatal failure. + static bool HasNonfatalFailure(); + + // Returns true iff the current test has a (either fatal or + // non-fatal) failure. + static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); } + + // Logs a property for the current test. Only the last value for a given + // key is remembered. + // These are public static so they can be called from utility functions + // that are not members of the test fixture. + // The arguments are const char* instead strings, as Google Test is used + // on platforms where string doesn't compile. + // + // Note that a driving consideration for these RecordProperty methods + // was to produce xml output suited to the Greenspan charting utility, + // which at present will only chart values that fit in a 32-bit int. It + // is the user's responsibility to restrict their values to 32-bit ints + // if they intend them to be used with Greenspan. + static void RecordProperty(const char* key, const char* value); + static void RecordProperty(const char* key, int value); + + protected: + // Creates a Test object. + Test(); + + // Sets up the test fixture. + virtual void SetUp(); + + // Tears down the test fixture. + virtual void TearDown(); + + private: + // Returns true iff the current test has the same fixture class as + // the first test in the current test case. + static bool HasSameFixtureClass(); + + // Runs the test after the test fixture has been set up. + // + // A sub-class must implement this to define the test logic. + // + // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM. + // Instead, use the TEST or TEST_F macro. + virtual void TestBody() = 0; + + // Sets up, executes, and tears down the test. + void Run(); + + // Deletes self. We deliberately pick an unusual name for this + // internal method to avoid clashing with names used in user TESTs. + void DeleteSelf_() { delete this; } + + // Uses a GTestFlagSaver to save and restore all Google Test flags. + const internal::GTestFlagSaver* const gtest_flag_saver_; + + // Often a user mis-spells SetUp() as Setup() and spends a long time + // wondering why it is never called by Google Test. The declaration of + // the following method is solely for catching such an error at + // compile time: + // + // - The return type is deliberately chosen to be not void, so it + // will be a conflict if a user declares void Setup() in his test + // fixture. + // + // - This method is private, so it will be another compiler error + // if a user calls it from his test fixture. + // + // DO NOT OVERRIDE THIS FUNCTION. + // + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } + + // We disallow copying Tests. + GTEST_DISALLOW_COPY_AND_ASSIGN_(Test); +}; + +typedef internal::TimeInMillis TimeInMillis; + +// A copyable object representing a user specified test property which can be +// output as a key/value string pair. +// +// Don't inherit from TestProperty as its destructor is not virtual. +class TestProperty { + public: + // C'tor. TestProperty does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestProperty object. + TestProperty(const char* a_key, const char* a_value) : + key_(a_key), value_(a_value) { + } + + // Gets the user supplied key. + const char* key() const { + return key_.c_str(); + } + + // Gets the user supplied value. + const char* value() const { + return value_.c_str(); + } + + // Sets a new value, overriding the one supplied in the constructor. + void SetValue(const char* new_value) { + value_ = new_value; + } + + private: + // The key supplied by the user. + internal::String key_; + // The value supplied by the user. + internal::String value_; +}; + +// The result of a single Test. This includes a list of +// TestPartResults, a list of TestProperties, a count of how many +// death tests there are in the Test, and how much time it took to run +// the Test. +// +// TestResult is not copyable. +class GTEST_API_ TestResult { + public: + // Creates an empty TestResult. + TestResult(); + + // D'tor. Do not inherit from TestResult. + ~TestResult(); + + // Gets the number of all test parts. This is the sum of the number + // of successful test parts and the number of failed test parts. + int total_part_count() const; + + // Returns the number of the test properties. + int test_property_count() const; + + // Returns true iff the test passed (i.e. no test part failed). + bool Passed() const { return !Failed(); } + + // Returns true iff the test failed. + bool Failed() const; + + // Returns true iff the test fatally failed. + bool HasFatalFailure() const; + + // Returns true iff the test has a non-fatal failure. + bool HasNonfatalFailure() const; + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test part result among all the results. i can range + // from 0 to test_property_count() - 1. If i is not in that range, aborts + // the program. + const TestPartResult& GetTestPartResult(int i) const; + + // Returns the i-th test property. i can range from 0 to + // test_property_count() - 1. If i is not in that range, aborts the + // program. + const TestProperty& GetTestProperty(int i) const; + + private: + friend class TestInfo; + friend class UnitTest; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::ExecDeathTest; + friend class internal::TestResultAccessor; + friend class internal::UnitTestImpl; + friend class internal::WindowsDeathTest; + + // Gets the vector of TestPartResults. + const std::vector& test_part_results() const { + return test_part_results_; + } + + // Gets the vector of TestProperties. + const std::vector& test_properties() const { + return test_properties_; + } + + // Sets the elapsed time. + void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; } + + // Adds a test property to the list. The property is validated and may add + // a non-fatal failure if invalid (e.g., if it conflicts with reserved + // key names). If a property is already recorded for the same key, the + // value will be updated, rather than storing multiple values for the same + // key. + void RecordProperty(const TestProperty& test_property); + + // Adds a failure if the key is a reserved attribute of Google Test + // testcase tags. Returns true if the property is valid. + // TODO(russr): Validate attribute names are legal and human readable. + static bool ValidateTestProperty(const TestProperty& test_property); + + // Adds a test part result to the list. + void AddTestPartResult(const TestPartResult& test_part_result); + + // Returns the death test count. + int death_test_count() const { return death_test_count_; } + + // Increments the death test count, returning the new count. + int increment_death_test_count() { return ++death_test_count_; } + + // Clears the test part results. + void ClearTestPartResults(); + + // Clears the object. + void Clear(); + + // Protects mutable state of the property vector and of owned + // properties, whose values may be updated. + internal::Mutex test_properites_mutex_; + + // The vector of TestPartResults + std::vector test_part_results_; + // The vector of TestProperties + std::vector test_properties_; + // Running count of death tests. + int death_test_count_; + // The elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestResult. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult); +}; // class TestResult + +// A TestInfo object stores the following information about a test: +// +// Test case name +// Test name +// Whether the test should be run +// A function pointer that creates the test object when invoked +// Test result +// +// The constructor of TestInfo registers itself with the UnitTest +// singleton such that the RUN_ALL_TESTS() macro knows which tests to +// run. +class GTEST_API_ TestInfo { + public: + // Destructs a TestInfo object. This function is not virtual, so + // don't inherit from TestInfo. + ~TestInfo(); + + // Returns the test case name. + const char* test_case_name() const { return test_case_name_.c_str(); } + + // Returns the test name. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a typed + // or a type-parameterized test. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns the text representation of the value parameter, or NULL if this + // is not a value-parameterized test. + const char* value_param() const { + if (value_param_.get() != NULL) + return value_param_->c_str(); + return NULL; + } + + // Returns true if this test should run, that is if the test is not disabled + // (or it is disabled but the also_run_disabled_tests flag has been specified) + // and its full name matches the user-specified filter. + // + // Google Test allows the user to filter the tests by their full names. + // The full name of a test Bar in test case Foo is defined as + // "Foo.Bar". Only the tests that match the filter will run. + // + // A filter is a colon-separated list of glob (not regex) patterns, + // optionally followed by a '-' and a colon-separated list of + // negative patterns (tests to exclude). A test is run if it + // matches one of the positive patterns and does not match any of + // the negative patterns. + // + // For example, *A*:Foo.* is a filter that matches any string that + // contains the character 'A' or starts with "Foo.". + bool should_run() const { return should_run_; } + + // Returns the result of the test. + const TestResult* result() const { return &result_; } + + private: + +#if GTEST_HAS_DEATH_TEST + friend class internal::DefaultDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + friend class Test; + friend class TestCase; + friend class internal::UnitTestImpl; + friend TestInfo* internal::MakeAndRegisterTestInfo( + const char* test_case_name, const char* name, + const char* type_param, + const char* value_param, + internal::TypeId fixture_class_id, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + internal::TestFactoryBase* factory); + + // Constructs a TestInfo object. The newly constructed instance assumes + // ownership of the factory object. + TestInfo(const char* test_case_name, const char* name, + const char* a_type_param, + const char* a_value_param, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory); + + // Increments the number of death tests encountered in this test so + // far. + int increment_death_test_count() { + return result_.increment_death_test_count(); + } + + // Creates the test object, runs it, records its result, and then + // deletes it. + void Run(); + + static void ClearTestResult(TestInfo* test_info) { + test_info->result_.Clear(); + } + + // These fields are immutable properties of the test. + const std::string test_case_name_; // Test case name + const std::string name_; // Test name + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // Text representation of the value parameter, or NULL if this is not a + // value-parameterized test. + const internal::scoped_ptr value_param_; + const internal::TypeId fixture_class_id_; // ID of the test fixture class + bool should_run_; // True iff this test should run + bool is_disabled_; // True iff this test is disabled + bool matches_filter_; // True if this test matches the + // user-specified filter. + internal::TestFactoryBase* const factory_; // The factory that creates + // the test object + + // This field is mutable and needs to be reset before running the + // test for the second time. + TestResult result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo); +}; + +// A test case, which consists of a vector of TestInfos. +// +// TestCase is not copyable. +class GTEST_API_ TestCase { + public: + // Creates a TestCase with the given name. + // + // TestCase does NOT have a default constructor. Always use this + // constructor to create a TestCase object. + // + // Arguments: + // + // name: name of the test case + // a_type_param: the name of the test's type parameter, or NULL if + // this is not a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase(const char* name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Destructor of TestCase. + virtual ~TestCase(); + + // Gets the name of the TestCase. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a + // type-parameterized test case. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns true if any test in this test case should run. + bool should_run() const { return should_run_; } + + // Gets the number of successful tests in this test case. + int successful_test_count() const; + + // Gets the number of failed tests in this test case. + int failed_test_count() const; + + // Gets the number of disabled tests in this test case. + int disabled_test_count() const; + + // Get the number of tests in this test case that should run. + int test_to_run_count() const; + + // Gets the number of all tests in this test case. + int total_test_count() const; + + // Returns true iff the test case passed. + bool Passed() const { return !Failed(); } + + // Returns true iff the test case failed. + bool Failed() const { return failed_test_count() > 0; } + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + const TestInfo* GetTestInfo(int i) const; + + private: + friend class Test; + friend class internal::UnitTestImpl; + + // Gets the (mutable) vector of TestInfos in this TestCase. + std::vector& test_info_list() { return test_info_list_; } + + // Gets the (immutable) vector of TestInfos in this TestCase. + const std::vector& test_info_list() const { + return test_info_list_; + } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + TestInfo* GetMutableTestInfo(int i); + + // Sets the should_run member. + void set_should_run(bool should) { should_run_ = should; } + + // Adds a TestInfo to this test case. Will delete the TestInfo upon + // destruction of the TestCase object. + void AddTestInfo(TestInfo * test_info); + + // Clears the results of all tests in this test case. + void ClearResult(); + + // Clears the results of all tests in the given test case. + static void ClearTestCaseResult(TestCase* test_case) { + test_case->ClearResult(); + } + + // Runs every test in this TestCase. + void Run(); + + // Runs SetUpTestCase() for this TestCase. This wrapper is needed + // for catching exceptions thrown from SetUpTestCase(). + void RunSetUpTestCase() { (*set_up_tc_)(); } + + // Runs TearDownTestCase() for this TestCase. This wrapper is + // needed for catching exceptions thrown from TearDownTestCase(). + void RunTearDownTestCase() { (*tear_down_tc_)(); } + + // Returns true iff test passed. + static bool TestPassed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Passed(); + } + + // Returns true iff test failed. + static bool TestFailed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Failed(); + } + + // Returns true iff test is disabled. + static bool TestDisabled(const TestInfo* test_info) { + return test_info->is_disabled_; + } + + // Returns true if the given test should run. + static bool ShouldRunTest(const TestInfo* test_info) { + return test_info->should_run(); + } + + // Shuffles the tests in this test case. + void ShuffleTests(internal::Random* random); + + // Restores the test order to before the first shuffle. + void UnshuffleTests(); + + // Name of the test case. + internal::String name_; + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // The vector of TestInfos in their original order. It owns the + // elements in the vector. + std::vector test_info_list_; + // Provides a level of indirection for the test list to allow easy + // shuffling and restoring the test order. The i-th element in this + // vector is the index of the i-th test in the shuffled test list. + std::vector test_indices_; + // Pointer to the function that sets up the test case. + Test::SetUpTestCaseFunc set_up_tc_; + // Pointer to the function that tears down the test case. + Test::TearDownTestCaseFunc tear_down_tc_; + // True iff any test in this test case should run. + bool should_run_; + // Elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestCases. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase); +}; + +// An Environment object is capable of setting up and tearing down an +// environment. The user should subclass this to define his own +// environment(s). +// +// An Environment object does the set-up and tear-down in virtual +// methods SetUp() and TearDown() instead of the constructor and the +// destructor, as: +// +// 1. You cannot safely throw from a destructor. This is a problem +// as in some cases Google Test is used where exceptions are enabled, and +// we may want to implement ASSERT_* using exceptions where they are +// available. +// 2. You cannot use ASSERT_* directly in a constructor or +// destructor. +class Environment { + public: + // The d'tor is virtual as we need to subclass Environment. + virtual ~Environment() {} + + // Override this to define how to set up the environment. + virtual void SetUp() {} + + // Override this to define how to tear down the environment. + virtual void TearDown() {} + private: + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } +}; + +// The interface for tracing execution of tests. The methods are organized in +// the order the corresponding events are fired. +class TestEventListener { + public: + virtual ~TestEventListener() {} + + // Fired before any test activity starts. + virtual void OnTestProgramStart(const UnitTest& unit_test) = 0; + + // Fired before each iteration of tests starts. There may be more than + // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration + // index, starting from 0. + virtual void OnTestIterationStart(const UnitTest& unit_test, + int iteration) = 0; + + // Fired before environment set-up for each iteration of tests starts. + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0; + + // Fired after environment set-up for each iteration of tests ends. + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0; + + // Fired before the test case starts. + virtual void OnTestCaseStart(const TestCase& test_case) = 0; + + // Fired before the test starts. + virtual void OnTestStart(const TestInfo& test_info) = 0; + + // Fired after a failed assertion or a SUCCEED() invocation. + virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0; + + // Fired after the test ends. + virtual void OnTestEnd(const TestInfo& test_info) = 0; + + // Fired after the test case ends. + virtual void OnTestCaseEnd(const TestCase& test_case) = 0; + + // Fired before environment tear-down for each iteration of tests starts. + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0; + + // Fired after environment tear-down for each iteration of tests ends. + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0; + + // Fired after each iteration of tests finishes. + virtual void OnTestIterationEnd(const UnitTest& unit_test, + int iteration) = 0; + + // Fired after all test activities have ended. + virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0; +}; + +// The convenience class for users who need to override just one or two +// methods and are not concerned that a possible change to a signature of +// the methods they override will not be caught during the build. For +// comments about each method please see the definition of TestEventListener +// above. +class EmptyTestEventListener : public TestEventListener { + public: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& /*test_case*/) {} + virtual void OnTestStart(const TestInfo& /*test_info*/) {} + virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {} + virtual void OnTestEnd(const TestInfo& /*test_info*/) {} + virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {} + virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} +}; + +// TestEventListeners lets users add listeners to track events in Google Test. +class GTEST_API_ TestEventListeners { + public: + TestEventListeners(); + ~TestEventListeners(); + + // Appends an event listener to the end of the list. Google Test assumes + // the ownership of the listener (i.e. it will delete the listener when + // the test program finishes). + void Append(TestEventListener* listener); + + // Removes the given event listener from the list and returns it. It then + // becomes the caller's responsibility to delete the listener. Returns + // NULL if the listener is not found in the list. + TestEventListener* Release(TestEventListener* listener); + + // Returns the standard listener responsible for the default console + // output. Can be removed from the listeners list to shut down default + // console output. Note that removing this object from the listener list + // with Release transfers its ownership to the caller and makes this + // function return NULL the next time. + TestEventListener* default_result_printer() const { + return default_result_printer_; + } + + // Returns the standard listener responsible for the default XML output + // controlled by the --gtest_output=xml flag. Can be removed from the + // listeners list by users who want to shut down the default XML output + // controlled by this flag and substitute it with custom one. Note that + // removing this object from the listener list with Release transfers its + // ownership to the caller and makes this function return NULL the next + // time. + TestEventListener* default_xml_generator() const { + return default_xml_generator_; + } + + private: + friend class TestCase; + friend class TestInfo; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::NoExecDeathTest; + friend class internal::TestEventListenersAccessor; + friend class internal::UnitTestImpl; + + // Returns repeater that broadcasts the TestEventListener events to all + // subscribers. + TestEventListener* repeater(); + + // Sets the default_result_printer attribute to the provided listener. + // The listener is also added to the listener list and previous + // default_result_printer is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultResultPrinter(TestEventListener* listener); + + // Sets the default_xml_generator attribute to the provided listener. The + // listener is also added to the listener list and previous + // default_xml_generator is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultXmlGenerator(TestEventListener* listener); + + // Controls whether events will be forwarded by the repeater to the + // listeners in the list. + bool EventForwardingEnabled() const; + void SuppressEventForwarding(); + + // The actual list of listeners. + internal::TestEventRepeater* repeater_; + // Listener responsible for the standard result output. + TestEventListener* default_result_printer_; + // Listener responsible for the creation of the XML output file. + TestEventListener* default_xml_generator_; + + // We disallow copying TestEventListeners. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners); +}; + +// A UnitTest consists of a vector of TestCases. +// +// This is a singleton class. The only instance of UnitTest is +// created when UnitTest::GetInstance() is first called. This +// instance is never deleted. +// +// UnitTest is not copyable. +// +// This class is thread-safe as long as the methods are called +// according to their specification. +class GTEST_API_ UnitTest { + public: + // Gets the singleton UnitTest object. The first time this method + // is called, a UnitTest object is constructed and returned. + // Consecutive calls will return the same object. + static UnitTest* GetInstance(); + + // Runs all tests in this UnitTest object and prints the result. + // Returns 0 if successful, or 1 otherwise. + // + // This method can only be called from the main thread. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + int Run() GTEST_MUST_USE_RESULT_; + + // Returns the working directory when the first TEST() or TEST_F() + // was executed. The UnitTest object owns the string. + const char* original_working_dir() const; + + // Returns the TestCase object for the test that's currently running, + // or NULL if no test is running. + const TestCase* current_test_case() const; + + // Returns the TestInfo object for the test that's currently running, + // or NULL if no test is running. + const TestInfo* current_test_info() const; + + // Returns the random seed used at the start of the current test run. + int random_seed() const; + +#if GTEST_HAS_PARAM_TEST + // Returns the ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry(); +#endif // GTEST_HAS_PARAM_TEST + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const; + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const; + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const; + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const; + + // Returns the list of event listeners that can be used to track events + // inside Google Test. + TestEventListeners& listeners(); + + private: + // Registers and returns a global test environment. When a test + // program is run, all global test environments will be set-up in + // the order they were registered. After all tests in the program + // have finished, all global test environments will be torn-down in + // the *reverse* order they were registered. + // + // The UnitTest object takes ownership of the given environment. + // + // This method can only be called from the main thread. + Environment* AddEnvironment(Environment* env); + + // Adds a TestPartResult to the current TestResult object. All + // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) + // eventually call this to report their results. The user code + // should use the assertion macros instead of calling this directly. + void AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const internal::String& message, + const internal::String& os_stack_trace); + + // Adds a TestProperty to the current TestResult object. If the result already + // contains a property with the same key, the value will be updated. + void RecordPropertyForCurrentTest(const char* key, const char* value); + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i); + + // Accessors for the implementation object. + internal::UnitTestImpl* impl() { return impl_; } + const internal::UnitTestImpl* impl() const { return impl_; } + + // These classes and funcions are friends as they need to access private + // members of UnitTest. + friend class Test; + friend class internal::AssertHelper; + friend class internal::ScopedTrace; + friend Environment* AddGlobalTestEnvironment(Environment* env); + friend internal::UnitTestImpl* internal::GetUnitTestImpl(); + friend void internal::ReportFailureInUnknownLocation( + TestPartResult::Type result_type, + const internal::String& message); + + // Creates an empty UnitTest. + UnitTest(); + + // D'tor + virtual ~UnitTest(); + + // Pushes a trace defined by SCOPED_TRACE() on to the per-thread + // Google Test trace stack. + void PushGTestTrace(const internal::TraceInfo& trace); + + // Pops a trace from the per-thread Google Test trace stack. + void PopGTestTrace(); + + // Protects mutable state in *impl_. This is mutable as some const + // methods need to lock it too. + mutable internal::Mutex mutex_; + + // Opaque implementation object. This field is never changed once + // the object is constructed. We don't mark it as const here, as + // doing so will cause a warning in the constructor of UnitTest. + // Mutable state in *impl_ is protected by mutex_. + internal::UnitTestImpl* impl_; + + // We disallow copying UnitTest. + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest); +}; + +// A convenient wrapper for adding an environment for the test +// program. +// +// You should call this before RUN_ALL_TESTS() is called, probably in +// main(). If you use gtest_main, you need to call this before main() +// starts for it to take effect. For example, you can define a global +// variable like this: +// +// testing::Environment* const foo_env = +// testing::AddGlobalTestEnvironment(new FooEnvironment); +// +// However, we strongly recommend you to write your own main() and +// call AddGlobalTestEnvironment() there, as relying on initialization +// of global variables makes the code harder to read and may cause +// problems when you register multiple environments from different +// translation units and the environments have dependencies among them +// (remember that the compiler doesn't guarantee the order in which +// global variables from different translation units are initialized). +inline Environment* AddGlobalTestEnvironment(Environment* env) { + return UnitTest::GetInstance()->AddEnvironment(env); +} + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +GTEST_API_ void InitGoogleTest(int* argc, char** argv); + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv); + +namespace internal { + +// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc) +// operand to be used in a failure message. The type (but not value) +// of the other operand may affect the format. This allows us to +// print a char* as a raw pointer when it is compared against another +// char*, and print it as a C string when it is compared against an +// std::string object, for example. +// +// The default implementation ignores the type of the other operand. +// Some specialized versions are used to handle formatting wide or +// narrow C strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +String FormatForComparisonFailureMessage(const T1& value, + const T2& /* other_operand */) { + // C++Builder compiles this incorrectly if the namespace isn't explicitly + // given. + return ::testing::PrintToString(value); +} + +// The helper function for {ASSERT|EXPECT}_EQ. +template +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4389) // Temporarily disables warning on + // signed/unsigned mismatch. +#endif + + if (expected == actual) { + return AssertionSuccess(); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// With this overloaded version, we allow anonymous enums to be used +// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums +// can be implicitly cast to BiggestInt. +GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual); + +// The helper class for {ASSERT|EXPECT}_EQ. The template argument +// lhs_is_null_literal is true iff the first argument to ASSERT_EQ() +// is a null pointer literal. The following default implementation is +// for lhs_is_null_literal being false. +template +class EqHelper { + public: + // This templatized version is for the general case. + template + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // With this overloaded version, we allow anonymous enums to be used + // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous + // enums can be implicitly cast to BiggestInt. + // + // Even though its body looks the same as the above version, we + // cannot merge the two, as it will make anonymous enums unhappy. + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } +}; + +// This specialization is used when the first argument to ASSERT_EQ() +// is a null pointer literal, like NULL, false, or 0. +template <> +class EqHelper { + public: + // We define two overloaded versions of Compare(). The first + // version will be picked when the second argument to ASSERT_EQ() is + // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or + // EXPECT_EQ(false, a_bool). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual, + // The following line prevents this overload from being considered if T2 + // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr) + // expands to Compare("", "", NULL, my_ptr), which requires a conversion + // to match the Secret* in the other overload, which would otherwise make + // this template match better. + typename EnableIf::value>::type* = 0) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // This version will be picked when the second argument to ASSERT_EQ() is a + // pointer, e.g. ASSERT_EQ(NULL, a_pointer). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + // We used to have a second template parameter instead of Secret*. That + // template parameter would deduce to 'long', making this a better match + // than the first overload even without the first overload's EnableIf. + // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to + // non-pointer argument" (even a deduced integral argument), so the old + // implementation caused warnings in user code. + Secret* /* expected (NULL) */, + T* actual) { + // We already know that 'expected' is a null pointer. + return CmpHelperEQ(expected_expression, actual_expression, + static_cast(NULL), actual); + } +}; + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste +// of similar code. +// +// For each templatized helper function, we also define an overloaded +// version for BiggestInt in order to reduce code bloat and allow +// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled +// with gcc 4. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +template \ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + const T1& val1, const T2& val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +}\ +GTEST_API_ AssertionResult CmpHelper##op_name(\ + const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2) + +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// Implements the helper function for {ASSERT|EXPECT}_NE +GTEST_IMPL_CMP_HELPER_(NE, !=); +// Implements the helper function for {ASSERT|EXPECT}_LE +GTEST_IMPL_CMP_HELPER_(LE, <=); +// Implements the helper function for {ASSERT|EXPECT}_LT +GTEST_IMPL_CMP_HELPER_(LT, < ); +// Implements the helper function for {ASSERT|EXPECT}_GE +GTEST_IMPL_CMP_HELPER_(GE, >=); +// Implements the helper function for {ASSERT|EXPECT}_GT +GTEST_IMPL_CMP_HELPER_(GT, > ); + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRNE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + + +// Helper function for *_STREQ on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual); + +// Helper function for *_STRNE on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2); + +} // namespace internal + +// IsSubstring() and IsNotSubstring() are intended to be used as the +// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by +// themselves. They check whether needle is a substring of haystack +// (NULL is considered a substring of itself only), and return an +// appropriate error message when they fail. +// +// The {needle,haystack}_expr arguments are the stringified +// expressions that generated the two real arguments. +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +// Helper template function for comparing floating-points. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression, + const char* actual_expression, + RawType expected, + RawType actual) { + const FloatingPoint lhs(expected), rhs(actual); + + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + ::std::stringstream expected_ss; + expected_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << expected; + + ::std::stringstream actual_ss; + actual_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << actual; + + return EqFailure(expected_expression, + actual_expression, + StringStreamToString(&expected_ss), + StringStreamToString(&actual_ss), + false); +} + +// Helper function for implementing ASSERT_NEAR. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error); + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// A class that enables one to stream messages to assertion macros +class GTEST_API_ AssertHelper { + public: + // Constructor. + AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message); + ~AssertHelper(); + + // Message assignment is a semantic trick to enable assertion + // streaming; see the GTEST_MESSAGE_ macro below. + void operator=(const Message& message) const; + + private: + // We put our data in a struct so that the size of the AssertHelper class can + // be as small as possible. This is important because gcc is incapable of + // re-using stack space even for temporary variables, so every EXPECT_EQ + // reserves stack space for another AssertHelper. + struct AssertHelperData { + AssertHelperData(TestPartResult::Type t, + const char* srcfile, + int line_num, + const char* msg) + : type(t), file(srcfile), line(line_num), message(msg) { } + + TestPartResult::Type const type; + const char* const file; + int const line; + String const message; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData); + }; + + AssertHelperData* const data_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper); +}; + +} // namespace internal + +#if GTEST_HAS_PARAM_TEST +// The pure interface class that all value-parameterized tests inherit from. +// A value-parameterized class must inherit from both ::testing::Test and +// ::testing::WithParamInterface. In most cases that just means inheriting +// from ::testing::TestWithParam, but more complicated test hierarchies +// may need to inherit from Test and WithParamInterface at different levels. +// +// This interface has support for accessing the test parameter value via +// the GetParam() method. +// +// Use it with one of the parameter generator defining functions, like Range(), +// Values(), ValuesIn(), Bool(), and Combine(). +// +// class FooTest : public ::testing::TestWithParam { +// protected: +// FooTest() { +// // Can use GetParam() here. +// } +// virtual ~FooTest() { +// // Can use GetParam() here. +// } +// virtual void SetUp() { +// // Can use GetParam() here. +// } +// virtual void TearDown { +// // Can use GetParam() here. +// } +// }; +// TEST_P(FooTest, DoesBar) { +// // Can use GetParam() method here. +// Foo foo; +// ASSERT_TRUE(foo.DoesBar(GetParam())); +// } +// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10)); + +template +class WithParamInterface { + public: + typedef T ParamType; + virtual ~WithParamInterface() {} + + // The current parameter value. Is also available in the test fixture's + // constructor. This member function is non-static, even though it only + // references static data, to reduce the opportunity for incorrect uses + // like writing 'WithParamInterface::GetParam()' for a test that + // uses a fixture whose parameter type is int. + const ParamType& GetParam() const { return *parameter_; } + + private: + // Sets parameter value. The caller is responsible for making sure the value + // remains alive and unchanged throughout the current test. + static void SetParam(const ParamType* parameter) { + parameter_ = parameter; + } + + // Static value used for accessing parameter during a test lifetime. + static const ParamType* parameter_; + + // TestClass must be a subclass of WithParamInterface and Test. + template friend class internal::ParameterizedTestFactory; +}; + +template +const T* WithParamInterface::parameter_ = NULL; + +// Most value-parameterized classes can ignore the existence of +// WithParamInterface, and can just inherit from ::testing::TestWithParam. + +template +class TestWithParam : public Test, public WithParamInterface { +}; + +#endif // GTEST_HAS_PARAM_TEST + +// Macros for indicating success/failure in test code. + +// ADD_FAILURE unconditionally adds a failure to the current test. +// SUCCEED generates a success - it doesn't automatically make the +// current test successful, as a test is only successful when it has +// no failure. +// +// EXPECT_* verifies that a certain condition is satisfied. If not, +// it behaves like ADD_FAILURE. In particular: +// +// EXPECT_TRUE verifies that a Boolean condition is true. +// EXPECT_FALSE verifies that a Boolean condition is false. +// +// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except +// that they will also abort the current function on failure. People +// usually want the fail-fast behavior of FAIL and ASSERT_*, but those +// writing data-driven tests often find themselves using ADD_FAILURE +// and EXPECT_* more. +// +// Examples: +// +// EXPECT_TRUE(server.StatusIsOK()); +// ASSERT_FALSE(server.HasPendingRequest(port)) +// << "There are still pending requests " << "on port " << port; + +// Generates a nonfatal failure with a generic message. +#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed") + +// Generates a nonfatal failure at the given source file location with +// a generic message. +#define ADD_FAILURE_AT(file, line) \ + GTEST_MESSAGE_AT_(file, line, "Failed", \ + ::testing::TestPartResult::kNonFatalFailure) + +// Generates a fatal failure with a generic message. +#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed") + +// Define this macro to 1 to omit the definition of FAIL(), which is a +// generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_FAIL +# define FAIL() GTEST_FAIL() +#endif + +// Generates a success with a generic message. +#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded") + +// Define this macro to 1 to omit the definition of SUCCEED(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_SUCCEED +# define SUCCEED() GTEST_SUCCEED() +#endif + +// Macros for testing exceptions. +// +// * {ASSERT|EXPECT}_THROW(statement, expected_exception): +// Tests that the statement throws the expected exception. +// * {ASSERT|EXPECT}_NO_THROW(statement): +// Tests that the statement doesn't throw any exception. +// * {ASSERT|EXPECT}_ANY_THROW(statement): +// Tests that the statement throws an exception. + +#define EXPECT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_) +#define EXPECT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define EXPECT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define ASSERT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_) +#define ASSERT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_) +#define ASSERT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_) + +// Boolean assertions. Condition can be either a Boolean expression or an +// AssertionResult. For more information on how to use AssertionResult with +// these macros see comments on that class. +#define EXPECT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_NONFATAL_FAILURE_) +#define EXPECT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_NONFATAL_FAILURE_) +#define ASSERT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_FATAL_FAILURE_) +#define ASSERT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_FATAL_FAILURE_) + +// Includes the auto-generated header that implements a family of +// generic predicate assertion macros. +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on 09/24/2010 by command +// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! +// +// Implements a family of generic predicate assertion macros. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Makes sure this header is not included before gtest.h. +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +# error Do not include gtest_pred_impl.h directly. Include gtest.h instead. +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ + +// This header implements a family of generic predicate assertion +// macros: +// +// ASSERT_PRED_FORMAT1(pred_format, v1) +// ASSERT_PRED_FORMAT2(pred_format, v1, v2) +// ... +// +// where pred_format is a function or functor that takes n (in the +// case of ASSERT_PRED_FORMATn) values and their source expression +// text, and returns a testing::AssertionResult. See the definition +// of ASSERT_EQ in gtest.h for an example. +// +// If you don't care about formatting, you can use the more +// restrictive version: +// +// ASSERT_PRED1(pred, v1) +// ASSERT_PRED2(pred, v1, v2) +// ... +// +// where pred is an n-ary function or functor that returns bool, +// and the values v1, v2, ..., must support the << operator for +// streaming to std::ostream. +// +// We also define the EXPECT_* variations. +// +// For now we only support predicates whose arity is at most 5. +// Please email googletestframework@googlegroups.com if you need +// support for higher arities. + +// GTEST_ASSERT_ is the basic statement to which all of the assertions +// in this file reduce. Don't use this in your code. + +#define GTEST_ASSERT_(expression, on_failure) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar = (expression)) \ + ; \ + else \ + on_failure(gtest_ar.failure_message()) + + +// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +template +AssertionResult AssertPred1Helper(const char* pred_text, + const char* e1, + Pred pred, + const T1& v1) { + if (pred(v1)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. +// Don't use this in your code. +#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, v1),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +#define GTEST_PRED1_(pred, v1, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \ + #v1, \ + pred, \ + v1), on_failure) + +// Unary predicate assertion macros. +#define EXPECT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +template +AssertionResult AssertPred2Helper(const char* pred_text, + const char* e1, + const char* e2, + Pred pred, + const T1& v1, + const T2& v2) { + if (pred(v1, v2)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. +// Don't use this in your code. +#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +#define GTEST_PRED2_(pred, v1, v2, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ + #v1, \ + #v2, \ + pred, \ + v1, \ + v2), on_failure) + +// Binary predicate assertion macros. +#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +template +AssertionResult AssertPred3Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3) { + if (pred(v1, v2, v3)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. +// Don't use this in your code. +#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + pred, \ + v1, \ + v2, \ + v3), on_failure) + +// Ternary predicate assertion macros. +#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +template +AssertionResult AssertPred4Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) { + if (pred(v1, v2, v3, v4)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. +// Don't use this in your code. +#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4), on_failure) + +// 4-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +template +AssertionResult AssertPred5Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + const char* e5, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) { + if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ", " + << e5 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4 + << "\n" << e5 << " evaluates to " << v5; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. +// Don't use this in your code. +#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5),\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + #v5, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4, \ + v5), on_failure) + +// 5-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) + + + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Macros for testing equalities and inequalities. +// +// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual +// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2 +// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2 +// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2 +// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2 +// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2 +// +// When they are not, Google Test prints both the tested expressions and +// their actual values. The values must be compatible built-in types, +// or you will get a compiler error. By "compatible" we mean that the +// values can be compared by the respective operator. +// +// Note: +// +// 1. It is possible to make a user-defined type work with +// {ASSERT|EXPECT}_??(), but that requires overloading the +// comparison operators and is thus discouraged by the Google C++ +// Usage Guide. Therefore, you are advised to use the +// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are +// equal. +// +// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on +// pointers (in particular, C strings). Therefore, if you use it +// with two C strings, you are testing how their locations in memory +// are related, not how their content is related. To compare two C +// strings by content, use {ASSERT|EXPECT}_STR*(). +// +// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to +// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you +// what the actual value is when it fails, and similarly for the +// other comparisons. +// +// 4. Do not depend on the order in which {ASSERT|EXPECT}_??() +// evaluate their arguments, which is undefined. +// +// 5. These macros evaluate their arguments exactly once. +// +// Examples: +// +// EXPECT_NE(5, Foo()); +// EXPECT_EQ(NULL, a_pointer); +// ASSERT_LT(i, array_size); +// ASSERT_GT(records.size(), 0) << "There is no record left."; + +#define EXPECT_EQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define EXPECT_NE(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual) +#define EXPECT_LE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define EXPECT_LT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define EXPECT_GE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define EXPECT_GT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +#define GTEST_ASSERT_EQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define GTEST_ASSERT_NE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2) +#define GTEST_ASSERT_LE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define GTEST_ASSERT_LT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define GTEST_ASSERT_GE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define GTEST_ASSERT_GT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of +// ASSERT_XY(), which clashes with some users' own code. + +#if !GTEST_DONT_DEFINE_ASSERT_EQ +# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_NE +# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LE +# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LT +# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GE +# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GT +# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) +#endif + +// C String Comparisons. All tests treat NULL and any non-NULL string +// as different. Two NULLs are equal. +// +// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2 +// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2 +// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case +// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case +// +// For wide or narrow string objects, you can use the +// {ASSERT|EXPECT}_??() macros. +// +// Don't depend on the order in which the arguments are evaluated, +// which is undefined. +// +// These macros evaluate their arguments exactly once. + +#define EXPECT_STREQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define EXPECT_STRNE(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define EXPECT_STRCASEEQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define EXPECT_STRCASENE(s1, s2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +#define ASSERT_STREQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define ASSERT_STRNE(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define ASSERT_STRCASEEQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define ASSERT_STRCASENE(s1, s2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +// Macros for comparing floating-point numbers. +// +// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual): +// Tests that two float values are almost equal. +// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual): +// Tests that two double values are almost equal. +// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error): +// Tests that v1 and v2 are within the given distance to each other. +// +// Google Test uses ULP-based comparison to automatically pick a default +// error bound that is appropriate for the operands. See the +// FloatingPoint template class in gtest-internal.h if you are +// interested in the implementation details. + +#define EXPECT_FLOAT_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_DOUBLE_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_FLOAT_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_DOUBLE_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_NEAR(val1, val2, abs_error)\ + EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +#define ASSERT_NEAR(val1, val2, abs_error)\ + ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +// These predicate format functions work on floating-point values, and +// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g. +// +// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0); + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2); +GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2); + + +#if GTEST_OS_WINDOWS + +// Macros that test for HRESULT failure and success, these are only useful +// on Windows, and rely on Windows SDK macros and APIs to compile. +// +// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr) +// +// When expr unexpectedly fails or succeeds, Google Test prints the +// expected result and the actual result with both a human-readable +// string representation of the error, if available, as well as the +// hex result code. +# define EXPECT_HRESULT_SUCCEEDED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define ASSERT_HRESULT_SUCCEEDED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define EXPECT_HRESULT_FAILED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +# define ASSERT_HRESULT_FAILED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +#endif // GTEST_OS_WINDOWS + +// Macros that execute statement and check that it doesn't generate new fatal +// failures in the current thread. +// +// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement); +// +// Examples: +// +// EXPECT_NO_FATAL_FAILURE(Process()); +// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed"; +// +#define ASSERT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) +#define EXPECT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) + +// Causes a trace (including the source file path, the current line +// number, and the given message) to be included in every test failure +// message generated by code in the current scope. The effect is +// undone when the control leaves the current scope. +// +// The message argument can be anything streamable to std::ostream. +// +// In the implementation, we include the current line number as part +// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s +// to appear in the same block - as long as they are on different +// lines. +#define SCOPED_TRACE(message) \ + ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\ + __FILE__, __LINE__, ::testing::Message() << (message)) + +// Compile-time assertion for type equality. +// StaticAssertTypeEq() compiles iff type1 and type2 are +// the same type. The value it returns is not interesting. +// +// Instead of making StaticAssertTypeEq a class template, we make it a +// function template that invokes a helper class template. This +// prevents a user from misusing StaticAssertTypeEq by +// defining objects of that type. +// +// CAVEAT: +// +// When used inside a method of a class template, +// StaticAssertTypeEq() is effective ONLY IF the method is +// instantiated. For example, given: +// +// template class Foo { +// public: +// void Bar() { testing::StaticAssertTypeEq(); } +// }; +// +// the code: +// +// void Test1() { Foo foo; } +// +// will NOT generate a compiler error, as Foo::Bar() is never +// actually instantiated. Instead, you need: +// +// void Test2() { Foo foo; foo.Bar(); } +// +// to cause a compiler error. +template +bool StaticAssertTypeEq() { + (void)internal::StaticAssertTypeEqHelper(); + return true; +} + +// Defines a test. +// +// The first parameter is the name of the test case, and the second +// parameter is the name of the test within the test case. +// +// The convention is to end the test case name with "Test". For +// example, a test case for the Foo class can be named FooTest. +// +// The user should put his test code between braces after using this +// macro. Example: +// +// TEST(FooTest, InitializesCorrectly) { +// Foo foo; +// EXPECT_TRUE(foo.StatusIsOK()); +// } + +// Note that we call GetTestTypeId() instead of GetTypeId< +// ::testing::Test>() here to get the type ID of testing::Test. This +// is to work around a suspected linker bug when using Google Test as +// a framework on Mac OS X. The bug causes GetTypeId< +// ::testing::Test>() to return different values depending on whether +// the call is from the Google Test framework itself or from user test +// code. GetTestTypeId() is guaranteed to always return the same +// value, as it always calls GetTypeId<>() from the Google Test +// framework. +#define GTEST_TEST(test_case_name, test_name)\ + GTEST_TEST_(test_case_name, test_name, \ + ::testing::Test, ::testing::internal::GetTestTypeId()) + +// Define this macro to 1 to omit the definition of TEST(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_TEST +# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name) +#endif + +// Defines a test that uses a test fixture. +// +// The first parameter is the name of the test fixture class, which +// also doubles as the test case name. The second parameter is the +// name of the test within the test case. +// +// A test fixture class must be declared earlier. The user should put +// his test code between braces after using this macro. Example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { b_.AddElement(3); } +// +// Foo a_; +// Foo b_; +// }; +// +// TEST_F(FooTest, InitializesCorrectly) { +// EXPECT_TRUE(a_.StatusIsOK()); +// } +// +// TEST_F(FooTest, ReturnsElementCountCorrectly) { +// EXPECT_EQ(0, a_.size()); +// EXPECT_EQ(1, b_.size()); +// } + +#define TEST_F(test_fixture, test_name)\ + GTEST_TEST_(test_fixture, test_name, test_fixture, \ + ::testing::internal::GetTypeId()) + +// Use this macro in main() to run all tests. It returns 0 if all +// tests are successful, or 1 otherwise. +// +// RUN_ALL_TESTS() should be invoked after the command line has been +// parsed by InitGoogleTest(). + +#define RUN_ALL_TESTS()\ + (::testing::UnitTest::GetInstance()->Run()) + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ diff --git a/modules/dnns_easily_fooled/caffe/src/gtest/gtest_main.cc b/modules/dnns_easily_fooled/caffe/src/gtest/gtest_main.cc new file mode 100644 index 000000000..a09bbe0c6 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/src/gtest/gtest_main.cc @@ -0,0 +1,39 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "gtest/gtest.h" + +GTEST_API_ int main(int argc, char **argv) { + std::cout << "Running main() from gtest_main.cc\n"; + + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/modules/dnns_easily_fooled/caffe/tools/compute_image_mean.cpp b/modules/dnns_easily_fooled/caffe/tools/compute_image_mean.cpp new file mode 100644 index 000000000..5b212b66e --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/compute_image_mean.cpp @@ -0,0 +1,171 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include +#include + +#include +#include + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" + +using caffe::Datum; +using caffe::BlobProto; +using std::max; + +int main(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + if (argc < 3 || argc > 4) { + LOG(ERROR) << "Usage: compute_image_mean input_leveldb output_file" + << " db_backend[leveldb or lmdb]"; + return 1; + } + + string db_backend = "leveldb"; + if (argc == 4) { + db_backend = string(argv[3]); + } + + // leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = false; + leveldb::Iterator* it; + // lmdb + MDB_env* mdb_env; + MDB_dbi mdb_dbi; + MDB_val mdb_key, mdb_value; + MDB_txn* mdb_txn; + MDB_cursor* mdb_cursor; + + // Open db + if (db_backend == "leveldb") { // leveldb + LOG(INFO) << "Opening leveldb " << argv[1]; + leveldb::Status status = leveldb::DB::Open( + options, argv[1], &db); + CHECK(status.ok()) << "Failed to open leveldb " << argv[1]; + leveldb::ReadOptions read_options; + read_options.fill_cache = false; + it = db->NewIterator(read_options); + it->SeekToFirst(); + } else if (db_backend == "lmdb") { // lmdb + LOG(INFO) << "Opening lmdb " << argv[1]; + CHECK_EQ(mdb_env_create(&mdb_env), MDB_SUCCESS) << "mdb_env_create failed"; + CHECK_EQ(mdb_env_set_mapsize(mdb_env, 1099511627776), MDB_SUCCESS); // 1TB + CHECK_EQ(mdb_env_open(mdb_env, argv[1], MDB_RDONLY, 0664), + MDB_SUCCESS) << "mdb_env_open failed"; + CHECK_EQ(mdb_txn_begin(mdb_env, NULL, MDB_RDONLY, &mdb_txn), MDB_SUCCESS) + << "mdb_txn_begin failed"; + CHECK_EQ(mdb_open(mdb_txn, NULL, 0, &mdb_dbi), MDB_SUCCESS) + << "mdb_open failed"; + CHECK_EQ(mdb_cursor_open(mdb_txn, mdb_dbi, &mdb_cursor), MDB_SUCCESS) + << "mdb_cursor_open failed"; + CHECK_EQ(mdb_cursor_get(mdb_cursor, &mdb_key, &mdb_value, MDB_FIRST), + MDB_SUCCESS); + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + + Datum datum; + BlobProto sum_blob; + int count = 0; + // load first datum + if (db_backend == "leveldb") { + datum.ParseFromString(it->value().ToString()); + } else if (db_backend == "lmdb") { + datum.ParseFromArray(mdb_value.mv_data, mdb_value.mv_size); + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + + sum_blob.set_num(1); + sum_blob.set_channels(datum.channels()); + sum_blob.set_height(datum.height()); + sum_blob.set_width(datum.width()); + const int data_size = datum.channels() * datum.height() * datum.width(); + int size_in_datum = std::max(datum.data().size(), + datum.float_data_size()); + for (int i = 0; i < size_in_datum; ++i) { + sum_blob.add_data(0.); + } + LOG(INFO) << "Starting Iteration"; + if (db_backend == "leveldb") { // leveldb + for (it->SeekToFirst(); it->Valid(); it->Next()) { + // just a dummy operation + datum.ParseFromString(it->value().ToString()); + const string& data = datum.data(); + size_in_datum = std::max(datum.data().size(), + datum.float_data_size()); + CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " << + size_in_datum; + if (data.size() != 0) { + for (int i = 0; i < size_in_datum; ++i) { + sum_blob.set_data(i, sum_blob.data(i) + (uint8_t)data[i]); + } + } else { + for (int i = 0; i < size_in_datum; ++i) { + sum_blob.set_data(i, sum_blob.data(i) + + static_cast(datum.float_data(i))); + } + } + ++count; + if (count % 10000 == 0) { + LOG(ERROR) << "Processed " << count << " files."; + } + } + } else if (db_backend == "lmdb") { // lmdb + CHECK_EQ(mdb_cursor_get(mdb_cursor, &mdb_key, &mdb_value, MDB_FIRST), + MDB_SUCCESS); + do { + // just a dummy operation + datum.ParseFromArray(mdb_value.mv_data, mdb_value.mv_size); + const string& data = datum.data(); + size_in_datum = std::max(datum.data().size(), + datum.float_data_size()); + CHECK_EQ(size_in_datum, data_size) << "Incorrect data field size " << + size_in_datum; + if (data.size() != 0) { + for (int i = 0; i < size_in_datum; ++i) { + sum_blob.set_data(i, sum_blob.data(i) + (uint8_t)data[i]); + } + } else { + for (int i = 0; i < size_in_datum; ++i) { + sum_blob.set_data(i, sum_blob.data(i) + + static_cast(datum.float_data(i))); + } + } + ++count; + if (count % 10000 == 0) { + LOG(ERROR) << "Processed " << count << " files."; + } + } while (mdb_cursor_get(mdb_cursor, &mdb_key, &mdb_value, MDB_NEXT) + == MDB_SUCCESS); + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + + if (count % 10000 != 0) { + LOG(ERROR) << "Processed " << count << " files."; + } + for (int i = 0; i < sum_blob.data_size(); ++i) { + sum_blob.set_data(i, sum_blob.data(i) / count); + } + // Write to disk + LOG(INFO) << "Write to " << argv[2]; + WriteProtoToBinaryFile(sum_blob, argv[2]); + + // Clean up + if (db_backend == "leveldb") { + delete db; + } else if (db_backend == "lmdb") { + mdb_cursor_close(mdb_cursor); + mdb_close(mdb_env, mdb_dbi); + mdb_txn_abort(mdb_txn); + mdb_env_close(mdb_env); + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/convert_imageset.cpp b/modules/dnns_easily_fooled/caffe/tools/convert_imageset.cpp new file mode 100644 index 000000000..aa9515514 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/convert_imageset.cpp @@ -0,0 +1,198 @@ +// Copyright 2014 BVLC and contributors. +// This program converts a set of images to a leveldb by storing them as Datum +// proto buffers. +// Usage: +// convert_imageset [-g] ROOTFOLDER/ LISTFILE DB_NAME RANDOM_SHUFFLE[0 or 1] \ +// [resize_height] [resize_width] +// where ROOTFOLDER is the root folder that holds all the images, and LISTFILE +// should be a list of files as well as their labels, in the format as +// subfolder1/file1.JPEG 7 +// .... +// if RANDOM_SHUFFLE is 1, a random shuffle will be carried out before we +// process the file lines. +// Optional flag -g indicates the images should be read as +// single-channel grayscale. If omitted, grayscale images will be +// converted to color. + +#include +#include +#include +#include +#include + +#include +#include // NOLINT(readability/streams) +#include +#include +#include + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" + +using namespace caffe; // NOLINT(build/namespaces) +using std::pair; +using std::string; + +int main(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + if (argc < 4 || argc > 9) { + printf("Convert a set of images to the leveldb format used\n" + "as input for Caffe.\n" + "Usage:\n" + " convert_imageset [-g] ROOTFOLDER/ LISTFILE DB_NAME" + " RANDOM_SHUFFLE_DATA[0 or 1] DB_BACKEND[leveldb or lmdb]" + " [resize_height] [resize_width]\n" + "The ImageNet dataset for the training demo is at\n" + " http://www.image-net.org/download-images\n"); + return 1; + } + + // Test whether argv[1] == "-g" + bool is_color= !(string("-g") == string(argv[1])); + int arg_offset = (is_color ? 0 : 1); + std::ifstream infile(argv[arg_offset+2]); + std::vector > lines; + string filename; + int label; + while (infile >> filename >> label) { + lines.push_back(std::make_pair(filename, label)); + } + if (argc >= (arg_offset+5) && argv[arg_offset+4][0] == '1') { + // randomly shuffle data + LOG(INFO) << "Shuffling data"; + std::random_shuffle(lines.begin(), lines.end()); + } + LOG(INFO) << "A total of " << lines.size() << " images."; + + string db_backend = "leveldb"; + if (argc >= (arg_offset+6)) { + db_backend = string(argv[arg_offset+5]); + if (!(db_backend == "leveldb") && !(db_backend == "lmdb")) { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + } + + int resize_height = 0; + int resize_width = 0; + if (argc >= (arg_offset+7)) { + resize_height = atoi(argv[arg_offset+6]); + } + if (argc >= (arg_offset+8)) { + resize_width = atoi(argv[arg_offset+7]); + } + + // Open new db + // lmdb + MDB_env *mdb_env; + MDB_dbi mdb_dbi; + MDB_val mdb_key, mdb_data; + MDB_txn *mdb_txn; + // leveldb + leveldb::DB* db; + leveldb::Options options; + options.error_if_exists = true; + options.create_if_missing = true; + options.write_buffer_size = 268435456; + leveldb::WriteBatch* batch; + + // Open db + if (db_backend == "leveldb") { // leveldb + LOG(INFO) << "Opening leveldb " << argv[arg_offset+3]; + leveldb::Status status = leveldb::DB::Open( + options, argv[arg_offset+3], &db); + CHECK(status.ok()) << "Failed to open leveldb " << argv[arg_offset+3]; + batch = new leveldb::WriteBatch(); + } else if (db_backend == "lmdb") { // lmdb + LOG(INFO) << "Opening lmdb " << argv[arg_offset+3]; + CHECK_EQ(mkdir(argv[arg_offset+3], 0744), 0) + << "mkdir " << argv[arg_offset+3] << "failed"; + CHECK_EQ(mdb_env_create(&mdb_env), MDB_SUCCESS) << "mdb_env_create failed"; + CHECK_EQ(mdb_env_set_mapsize(mdb_env, 1099511627776), MDB_SUCCESS) // 1TB + << "mdb_env_set_mapsize failed"; + CHECK_EQ(mdb_env_open(mdb_env, argv[3], 0, 0664), MDB_SUCCESS) + << "mdb_env_open failed"; + CHECK_EQ(mdb_txn_begin(mdb_env, NULL, 0, &mdb_txn), MDB_SUCCESS) + << "mdb_txn_begin failed"; + CHECK_EQ(mdb_open(mdb_txn, NULL, 0, &mdb_dbi), MDB_SUCCESS) + << "mdb_open failed"; + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + + // Storing to db + string root_folder(argv[arg_offset+1]); + Datum datum; + int count = 0; + const int kMaxKeyLength = 256; + char key_cstr[kMaxKeyLength]; + int data_size; + bool data_size_initialized = false; + + for (int line_id = 0; line_id < lines.size(); ++line_id) { + if (!ReadImageToDatum(root_folder + lines[line_id].first, + lines[line_id].second, resize_height, resize_width, is_color, &datum)) { + continue; + } + if (!data_size_initialized) { + data_size = datum.channels() * datum.height() * datum.width(); + data_size_initialized = true; + } else { + const string& data = datum.data(); + CHECK_EQ(data.size(), data_size) << "Incorrect data field size " + << data.size(); + } + // sequential + snprintf(key_cstr, kMaxKeyLength, "%08d_%s", line_id, + lines[line_id].first.c_str()); + string value; + datum.SerializeToString(&value); + string keystr(key_cstr); + + // Put in db + if (db_backend == "leveldb") { // leveldb + batch->Put(keystr, value); + } else if (db_backend == "lmdb") { // lmdb + mdb_data.mv_size = value.size(); + mdb_data.mv_data = reinterpret_cast(&value[0]); + mdb_key.mv_size = keystr.size(); + mdb_key.mv_data = reinterpret_cast(&keystr[0]); + CHECK_EQ(mdb_put(mdb_txn, mdb_dbi, &mdb_key, &mdb_data, 0), MDB_SUCCESS) + << "mdb_put failed"; + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + + if (++count % 1000 == 0) { + // Commit txn + if (db_backend == "leveldb") { // leveldb + db->Write(leveldb::WriteOptions(), batch); + delete batch; + batch = new leveldb::WriteBatch(); + } else if (db_backend == "lmdb") { // lmdb + CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) + << "mdb_txn_commit failed"; + CHECK_EQ(mdb_txn_begin(mdb_env, NULL, 0, &mdb_txn), MDB_SUCCESS) + << "mdb_txn_begin failed"; + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + LOG(ERROR) << "Processed " << count << " files."; + } + } + // write the last batch + if (count % 1000 != 0) { + if (db_backend == "leveldb") { // leveldb + db->Write(leveldb::WriteOptions(), batch); + delete batch; + delete db; + } else if (db_backend == "lmdb") { // lmdb + CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; + mdb_close(mdb_env, mdb_dbi); + mdb_env_close(mdb_env); + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + LOG(ERROR) << "Processed " << count << " files."; + } + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/convert_imageset.cpp.backup b/modules/dnns_easily_fooled/caffe/tools/convert_imageset.cpp.backup new file mode 100644 index 000000000..aa9515514 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/convert_imageset.cpp.backup @@ -0,0 +1,198 @@ +// Copyright 2014 BVLC and contributors. +// This program converts a set of images to a leveldb by storing them as Datum +// proto buffers. +// Usage: +// convert_imageset [-g] ROOTFOLDER/ LISTFILE DB_NAME RANDOM_SHUFFLE[0 or 1] \ +// [resize_height] [resize_width] +// where ROOTFOLDER is the root folder that holds all the images, and LISTFILE +// should be a list of files as well as their labels, in the format as +// subfolder1/file1.JPEG 7 +// .... +// if RANDOM_SHUFFLE is 1, a random shuffle will be carried out before we +// process the file lines. +// Optional flag -g indicates the images should be read as +// single-channel grayscale. If omitted, grayscale images will be +// converted to color. + +#include +#include +#include +#include +#include + +#include +#include // NOLINT(readability/streams) +#include +#include +#include + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" + +using namespace caffe; // NOLINT(build/namespaces) +using std::pair; +using std::string; + +int main(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + if (argc < 4 || argc > 9) { + printf("Convert a set of images to the leveldb format used\n" + "as input for Caffe.\n" + "Usage:\n" + " convert_imageset [-g] ROOTFOLDER/ LISTFILE DB_NAME" + " RANDOM_SHUFFLE_DATA[0 or 1] DB_BACKEND[leveldb or lmdb]" + " [resize_height] [resize_width]\n" + "The ImageNet dataset for the training demo is at\n" + " http://www.image-net.org/download-images\n"); + return 1; + } + + // Test whether argv[1] == "-g" + bool is_color= !(string("-g") == string(argv[1])); + int arg_offset = (is_color ? 0 : 1); + std::ifstream infile(argv[arg_offset+2]); + std::vector > lines; + string filename; + int label; + while (infile >> filename >> label) { + lines.push_back(std::make_pair(filename, label)); + } + if (argc >= (arg_offset+5) && argv[arg_offset+4][0] == '1') { + // randomly shuffle data + LOG(INFO) << "Shuffling data"; + std::random_shuffle(lines.begin(), lines.end()); + } + LOG(INFO) << "A total of " << lines.size() << " images."; + + string db_backend = "leveldb"; + if (argc >= (arg_offset+6)) { + db_backend = string(argv[arg_offset+5]); + if (!(db_backend == "leveldb") && !(db_backend == "lmdb")) { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + } + + int resize_height = 0; + int resize_width = 0; + if (argc >= (arg_offset+7)) { + resize_height = atoi(argv[arg_offset+6]); + } + if (argc >= (arg_offset+8)) { + resize_width = atoi(argv[arg_offset+7]); + } + + // Open new db + // lmdb + MDB_env *mdb_env; + MDB_dbi mdb_dbi; + MDB_val mdb_key, mdb_data; + MDB_txn *mdb_txn; + // leveldb + leveldb::DB* db; + leveldb::Options options; + options.error_if_exists = true; + options.create_if_missing = true; + options.write_buffer_size = 268435456; + leveldb::WriteBatch* batch; + + // Open db + if (db_backend == "leveldb") { // leveldb + LOG(INFO) << "Opening leveldb " << argv[arg_offset+3]; + leveldb::Status status = leveldb::DB::Open( + options, argv[arg_offset+3], &db); + CHECK(status.ok()) << "Failed to open leveldb " << argv[arg_offset+3]; + batch = new leveldb::WriteBatch(); + } else if (db_backend == "lmdb") { // lmdb + LOG(INFO) << "Opening lmdb " << argv[arg_offset+3]; + CHECK_EQ(mkdir(argv[arg_offset+3], 0744), 0) + << "mkdir " << argv[arg_offset+3] << "failed"; + CHECK_EQ(mdb_env_create(&mdb_env), MDB_SUCCESS) << "mdb_env_create failed"; + CHECK_EQ(mdb_env_set_mapsize(mdb_env, 1099511627776), MDB_SUCCESS) // 1TB + << "mdb_env_set_mapsize failed"; + CHECK_EQ(mdb_env_open(mdb_env, argv[3], 0, 0664), MDB_SUCCESS) + << "mdb_env_open failed"; + CHECK_EQ(mdb_txn_begin(mdb_env, NULL, 0, &mdb_txn), MDB_SUCCESS) + << "mdb_txn_begin failed"; + CHECK_EQ(mdb_open(mdb_txn, NULL, 0, &mdb_dbi), MDB_SUCCESS) + << "mdb_open failed"; + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + + // Storing to db + string root_folder(argv[arg_offset+1]); + Datum datum; + int count = 0; + const int kMaxKeyLength = 256; + char key_cstr[kMaxKeyLength]; + int data_size; + bool data_size_initialized = false; + + for (int line_id = 0; line_id < lines.size(); ++line_id) { + if (!ReadImageToDatum(root_folder + lines[line_id].first, + lines[line_id].second, resize_height, resize_width, is_color, &datum)) { + continue; + } + if (!data_size_initialized) { + data_size = datum.channels() * datum.height() * datum.width(); + data_size_initialized = true; + } else { + const string& data = datum.data(); + CHECK_EQ(data.size(), data_size) << "Incorrect data field size " + << data.size(); + } + // sequential + snprintf(key_cstr, kMaxKeyLength, "%08d_%s", line_id, + lines[line_id].first.c_str()); + string value; + datum.SerializeToString(&value); + string keystr(key_cstr); + + // Put in db + if (db_backend == "leveldb") { // leveldb + batch->Put(keystr, value); + } else if (db_backend == "lmdb") { // lmdb + mdb_data.mv_size = value.size(); + mdb_data.mv_data = reinterpret_cast(&value[0]); + mdb_key.mv_size = keystr.size(); + mdb_key.mv_data = reinterpret_cast(&keystr[0]); + CHECK_EQ(mdb_put(mdb_txn, mdb_dbi, &mdb_key, &mdb_data, 0), MDB_SUCCESS) + << "mdb_put failed"; + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + + if (++count % 1000 == 0) { + // Commit txn + if (db_backend == "leveldb") { // leveldb + db->Write(leveldb::WriteOptions(), batch); + delete batch; + batch = new leveldb::WriteBatch(); + } else if (db_backend == "lmdb") { // lmdb + CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) + << "mdb_txn_commit failed"; + CHECK_EQ(mdb_txn_begin(mdb_env, NULL, 0, &mdb_txn), MDB_SUCCESS) + << "mdb_txn_begin failed"; + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + LOG(ERROR) << "Processed " << count << " files."; + } + } + // write the last batch + if (count % 1000 != 0) { + if (db_backend == "leveldb") { // leveldb + db->Write(leveldb::WriteOptions(), batch); + delete batch; + delete db; + } else if (db_backend == "lmdb") { // lmdb + CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; + mdb_close(mdb_env, mdb_dbi); + mdb_env_close(mdb_env); + } else { + LOG(FATAL) << "Unknown db backend " << db_backend; + } + LOG(ERROR) << "Processed " << count << " files."; + } + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/device_query.cpp b/modules/dnns_easily_fooled/caffe/tools/device_query.cpp new file mode 100644 index 000000000..5040b8ee9 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/device_query.cpp @@ -0,0 +1,23 @@ +// Copyright 2014 BVLC and contributors. + + +#include "caffe/common.hpp" +#include "caffe/net.hpp" + + +using namespace caffe; // NOLINT(build/namespaces) + +int main(int argc, char** argv) { + if (argc > 2) { + LOG(ERROR) << "device_query [device_id=0]"; + return 1; + } + if (argc == 2) { + LOG(INFO) << "Querying device_id=" << argv[1]; + Caffe::SetDevice(atoi(argv[1])); + Caffe::DeviceQuery(); + } else { + Caffe::DeviceQuery(); + } + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/dump_network.cpp b/modules/dnns_easily_fooled/caffe/tools/dump_network.cpp new file mode 100644 index 000000000..f29e150b0 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/dump_network.cpp @@ -0,0 +1,80 @@ +// Copyright 2014 BVLC and contributors. +// +// This program takes in a trained network and an input blob, and then dumps +// all the intermediate blobs produced by the net to individual binary +// files stored in protobuffer binary formats. +// Usage: +// dump_network input_net_param trained_net_param \ +// input_blob output_prefix 0/1 +// if input_net_param is 'none', we will directly load the network from +// trained_net_param. If the last argv is 1, we will do a forward-backward pass +// before dumping everyting, and also dump the who network. + +#include +#include + +#include "cuda_runtime.h" +#include "fcntl.h" +#include "google/protobuf/text_format.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/net.hpp" +#include "caffe/filler.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" +#include "caffe/solver.hpp" + +using namespace caffe; // NOLINT(build/namespaces) + +int main(int argc, char** argv) { + Caffe::set_mode(Caffe::GPU); + Caffe::set_phase(Caffe::TEST); + + shared_ptr > caffe_net; + if (strcmp(argv[1], "none") == 0) { + // We directly load the net param from trained file + caffe_net.reset(new Net(argv[2])); + } else { + caffe_net.reset(new Net(argv[1])); + } + caffe_net->CopyTrainedLayersFrom(argv[2]); + + vector* > input_vec; + shared_ptr > input_blob(new Blob()); + if (strcmp(argv[3], "none") != 0) { + BlobProto input_blob_proto; + ReadProtoFromBinaryFile(argv[3], &input_blob_proto); + input_blob->FromProto(input_blob_proto); + input_vec.push_back(input_blob.get()); + } + + string output_prefix(argv[4]); + // Run the network without training. + LOG(ERROR) << "Performing Forward"; + caffe_net->Forward(input_vec); + if (argc > 5 && strcmp(argv[5], "1") == 0) { + LOG(ERROR) << "Performing Backward"; + Caffe::set_phase(Caffe::TRAIN); + caffe_net->Backward(); + // Dump the network + NetParameter output_net_param; + caffe_net->ToProto(&output_net_param, true); + WriteProtoToBinaryFile(output_net_param, + output_prefix + output_net_param.name()); + } + // Now, let's dump all the layers + + const vector& blob_names = caffe_net->blob_names(); + const vector > >& blobs = caffe_net->blobs(); + for (int blobid = 0; blobid < caffe_net->blobs().size(); ++blobid) { + // Serialize blob + LOG(ERROR) << "Dumping " << blob_names[blobid]; + BlobProto output_blob_proto; + blobs[blobid]->ToProto(&output_blob_proto); + WriteProtoToBinaryFile(output_blob_proto, + output_prefix + blob_names[blobid]); + } + + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/extra/extract_seconds.py b/modules/dnns_easily_fooled/caffe/tools/extra/extract_seconds.py new file mode 100755 index 000000000..f791afa32 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/extra/extract_seconds.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +import datetime +import os +import sys + +def extract_datetime_from_line(line, year): + # Expected format: I0210 13:39:22.381027 25210 solver.cpp:204] Iteration 100, lr = 0.00992565 + line = line.strip().split() + month = int(line[0][1:3]) + day = int(line[0][3:]) + timestamp = line[1] + pos = timestamp.rfind('.') + ts = [int(x) for x in timestamp[:pos].split(':')] + hour = ts[0] + minute = ts[1] + second = ts[2] + microsecond = int(timestamp[pos + 1:]) + dt = datetime.datetime(year, month, day, hour, minute, second, microsecond) + return dt + +def extract_seconds(input_file, output_file): + with open(input_file, 'r') as f: + lines = f.readlines() + log_created_time = os.path.getctime(input_file) + log_created_year = datetime.datetime.fromtimestamp(log_created_time).year + start_time_found = False + out = open(output_file, 'w') + for line in lines: + line = line.strip() + if not start_time_found and line.find('Solving') != -1: + start_time_found = True + start_datetime = extract_datetime_from_line(line, log_created_year) + if line.find('Iteration') != -1: + dt = extract_datetime_from_line(line, log_created_year) + elapsed_seconds = (dt - start_datetime).total_seconds() + out.write('%f\n' % elapsed_seconds) + out.close() + +if __name__ == '__main__': + if len(sys.argv) < 3: + print('Usage: ./extract_seconds input_file output_file') + exit(1) + extract_seconds(sys.argv[1], sys.argv[2]) diff --git a/modules/dnns_easily_fooled/caffe/tools/extra/launch_resize_and_crop_images.sh b/modules/dnns_easily_fooled/caffe/tools/extra/launch_resize_and_crop_images.sh new file mode 100755 index 000000000..84ca858cd --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/extra/launch_resize_and_crop_images.sh @@ -0,0 +1,24 @@ +#!/bin/bash +#### https://github.com/Yangqing/mincepie/wiki/Launch-Your-Mapreducer + +# If you encounter error that the address already in use, kill the process. +# 11235 is the port of server process +# https://github.com/Yangqing/mincepie/blob/master/mincepie/mince.py +# sudo netstat -ap | grep 11235 +# The last column of the output is PID/Program name +# kill -9 PID +# Second solution: +# nmap localhost +# fuser -k 11235/tcp +# Or just wait a few seconds. + +## Launch your Mapreduce locally +# num_clients: number of processes +# image_lib: OpenCV or PIL, case insensitive. The default value is the faster OpenCV. +# input: the file containing one image path relative to input_folder each line +# input_folder: where are the original images +# output_folder: where to save the resized and cropped images +./resize_and_crop_images.py --num_clients=8 --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ + +## Launch your Mapreduce with MPI +# mpirun -n 8 --launch=mpi resize_and_crop_images.py --image_lib=opencv --input=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images.txt --input_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train/ --output_folder=/home/user/Datasets/ImageNet/ILSVRC2010/ILSVRC2010_images_train_resized/ diff --git a/modules/dnns_easily_fooled/caffe/tools/extra/parse_log.sh b/modules/dnns_easily_fooled/caffe/tools/extra/parse_log.sh new file mode 100755 index 000000000..01ea6f493 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/extra/parse_log.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Usage parse_log.sh caffe.log +# It creates two files one caffe.log.test that contains the loss and test accuracy of the test and +# another one caffe.log.loss that contains the loss computed during the training + +# get the dirname of the script +DIR="$( cd "$(dirname "$0")" ; pwd -P )" + +if [ "$#" -lt 1 ] +then +echo "Usage parse_log.sh /path/to/your.log" +exit +fi +LOG=`basename $1` +grep -B 1 'Test ' $1 > aux.txt +grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt +grep 'Test score #0' aux.txt | awk '{print $8}' > aux1.txt +grep 'Test score #1' aux.txt | awk '{print $8}' > aux2.txt + +# Extracting elpased seconds +# For extraction of time since this line constains the start time +grep '] Solving ' $1 > aux3.txt +grep 'Testing net' $1 >> aux3.txt +$DIR/extract_seconds.py aux3.txt aux4.txt + +# Generating +echo '# Iters Seconds TestAccuracy TestLoss'> $LOG.test +paste aux0.txt aux4.txt aux1.txt aux2.txt | column -t >> $LOG.test +rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt aux4.txt + +# For extraction of time since this line constains the start time +grep '] Solving ' $1 > aux.txt +grep ', loss = ' $1 >> aux.txt +grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt +grep ', loss = ' $1 | awk '{print $9}' > aux1.txt +grep ', lr = ' $1 | awk '{print $9}' > aux2.txt + +# Extracting elpased seconds +$DIR/extract_seconds.py aux.txt aux3.txt + +# Generating +echo '# Iters Seconds TrainingLoss LearningRate'> $LOG.train +paste aux0.txt aux3.txt aux1.txt aux2.txt | column -t >> $LOG.train +rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt diff --git a/modules/dnns_easily_fooled/caffe/tools/extra/plot_log.gnuplot.example b/modules/dnns_easily_fooled/caffe/tools/extra/plot_log.gnuplot.example new file mode 100644 index 000000000..76715c589 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/extra/plot_log.gnuplot.example @@ -0,0 +1,69 @@ +# These snippets serve only as basic examples. +# Customization is a must. +# You can copy, paste, edit them in whatever way you want. +# Be warned that the fields in the training log may change in the future. +# You had better check the data files before designing your own plots. + +# Please generate the neccessary data files with +# /path/to/caffe/scripts/parse_log.sh before plotting. +# Example usage: +# ./parse_log.sh mnist.log +# Now you have mnist.log.train and mnist.log.test. +# gnuplot mnist.gnuplot + +# The fields present in the data files that are usually proper to plot along +# the y axis are test accuracy, test loss, training loss, and learning rate. +# Those should plot along the x axis are training iterations and seconds. +# Possible combinations: +# 1. Test accuracy (test score 0) vs. training iterations / time; +# 2. Test loss (test score 1) time; +# 3. Training loss vs. training iterations / time; +# 4. Learning rate vs. training iterations / time; +# A rarer one: Training time vs. iterations. + +# What is the difference between plotting against iterations and time? +# If the overhead in one iteration is too high, one algorithm might appear +# to be faster in terms of progress per iteration and slower when measured +# against time. And the reverse case is not entirely impossible. Thus, some +# papers chose to only publish the more favorable type. It is your freedom +# to decide what to plot. + +reset +set terminal png +set output "your_chart_name.png" +set style data lines +set key right + +###### Fields in the data file your_log_name.log.train are +###### Iters Seconds TrainingLoss LearningRate + +# Training loss vs. training iterations +set title "Training loss vs. training iterations" +set xlabel "Training loss" +set ylabel "Training iterations" +plot "mnist.log.train" using 1:3 title "mnist" + +# Training loss vs. training time +# plot "mnist.log.train" using 2:3 title "mnist" + +# Learning rate vs. training iterations; +# plot "mnist.log.train" using 1:4 title "mnist" + +# Learning rate vs. training time; +# plot "mnist.log.train" using 2:4 title "mnist" + + +###### Fields in the data file your_log_name.log.test are +###### Iters Seconds TestAccuracy TestLoss + +# Test loss vs. training iterations +# plot "mnist.log.test" using 1:4 title "mnist" + +# Test accuracy vs. training iterations +# plot "mnist.log.test" using 1:3 title "mnist" + +# Test loss vs. training time +# plot "mnist.log.test" using 2:4 title "mnist" + +# Test accuracy vs. training time +# plot "mnist.log.test" using 2:3 title "mnist" diff --git a/modules/dnns_easily_fooled/caffe/tools/extra/plot_training_log.py.example b/modules/dnns_easily_fooled/caffe/tools/extra/plot_training_log.py.example new file mode 100755 index 000000000..b6fda54e0 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/extra/plot_training_log.py.example @@ -0,0 +1,187 @@ +#!/usr/bin/env python +import inspect +import os +import random +import sys +import matplotlib.cm as cmx +import matplotlib.colors as colors +import matplotlib.pyplot as plt +import matplotlib.legend as lgd +import matplotlib.markers as mks + +def get_log_parsing_script(): + dirname = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + return dirname + '/parse_log.sh' + +def get_log_file_suffix(): + return '.log' + +def get_chart_type_description_separator(): + return ' vs. ' + +def is_x_axis_field(field): + x_axis_fields = ['Iters', 'Seconds'] + return field in x_axis_fields + +def create_field_index(): + train_key = 'Train' + test_key = 'Test' + field_index = {train_key:{'Iters':0, 'Seconds':1, train_key + ' loss':2, + train_key + ' learning rate':3}, + test_key:{'Iters':0, 'Seconds':1, test_key + ' accuracy':2, + test_key + ' loss':3}} + fields = set() + for data_file_type in field_index.keys(): + fields = fields.union(set(field_index[data_file_type].keys())) + fields = list(fields) + fields.sort() + return field_index, fields + +def get_supported_chart_types(): + field_index, fields = create_field_index() + num_fields = len(fields) + supported_chart_types = [] + for i in xrange(num_fields): + if not is_x_axis_field(fields[i]): + for j in xrange(num_fields): + if i != j and is_x_axis_field(fields[j]): + supported_chart_types.append('%s%s%s' % ( + fields[i], get_chart_type_description_separator(), + fields[j])) + return supported_chart_types + +def get_chart_type_description(chart_type): + supported_chart_types = get_supported_chart_types() + chart_type_description = supported_chart_types[chart_type] + return chart_type_description + +def get_data_file_type(chart_type): + description = get_chart_type_description(chart_type) + data_file_type = description.split()[0] + return data_file_type + +def get_data_file(chart_type, path_to_log): + return os.path.basename(path_to_log) + '.' + get_data_file_type(chart_type).lower() + +def get_field_descriptions(chart_type): + description = get_chart_type_description(chart_type).split( + get_chart_type_description_separator()) + y_axis_field = description[0] + x_axis_field = description[1] + return x_axis_field, y_axis_field + +def get_field_indecies(x_axis_field, y_axis_field): + data_file_type = get_data_file_type(chart_type) + fields = create_field_index()[0][data_file_type] + return fields[x_axis_field], fields[y_axis_field] + +def load_data(data_file, field_idx0, field_idx1): + data = [[], []] + with open(data_file, 'r') as f: + for line in f: + line = line.strip() + if line[0] != '#': + fields = line.split() + data[0].append(float(fields[field_idx0].strip())) + data[1].append(float(fields[field_idx1].strip())) + return data + +def random_marker(): + markers = mks.MarkerStyle.markers + num = len(markers.values()) + idx = random.randint(0, num - 1) + return markers.values()[idx] + +def get_data_label(path_to_log): + label = path_to_log[path_to_log.rfind('/')+1 : path_to_log.rfind( + get_log_file_suffix())] + return label + +def get_legend_loc(chart_type): + x_axis, y_axis = get_field_descriptions(chart_type) + loc = 'lower right' + if y_axis.find('accuracy') != -1: + pass + if y_axis.find('loss') != -1 or y_axis.find('learning rate') != -1: + loc = 'upper right' + return loc + +def plot_chart(chart_type, path_to_png, path_to_log_list): + for path_to_log in path_to_log_list: + os.system('%s %s' % (get_log_parsing_script(), path_to_log)) + data_file = get_data_file(chart_type, path_to_log) + x_axis_field, y_axis_field = get_field_descriptions(chart_type) + x, y = get_field_indecies(x_axis_field, y_axis_field) + data = load_data(data_file, x, y) + ## TODO: more systematic color cycle for lines + color = [random.random(), random.random(), random.random()] + label = get_data_label(path_to_log) + linewidth = 0.75 + ## If there too many datapoints, do not use marker. +## use_marker = False + use_marker = True + if not use_marker: + plt.plot(data[0], data[1], label = label, color = color, + linewidth = linewidth) + else: + ok = False + ## Some markers throw ValueError: Unrecognized marker style + while not ok: + try: + marker = random_marker() + plt.plot(data[0], data[1], label = label, color = color, + marker = marker, linewidth = linewidth) + ok = True + except: + pass + legend_loc = get_legend_loc(chart_type) + plt.legend(loc = legend_loc, ncol = 1) # ajust ncol to fit the space + plt.title(get_chart_type_description(chart_type)) + plt.xlabel(x_axis_field) + plt.ylabel(y_axis_field) + plt.savefig(path_to_png) + plt.show() + +def print_help(): + print """This script mainly serves as the basis of your customizations. +Customization is a must. +You can copy, paste, edit them in whatever way you want. +Be warned that the fields in the training log may change in the future. +You had better check the data files and change the mapping from field name to + field index in create_field_index before designing your own plots. +Usage: + ./plot_log.sh chart_type[0-%s] /where/to/save.png /path/to/first.log ... +Notes: + 1. Supporting multiple logs. + 2. Log file name must end with the lower-cased "%s". +Supported chart types:""" % (len(get_supported_chart_types()) - 1, + get_log_file_suffix()) + supported_chart_types = get_supported_chart_types() + num = len(supported_chart_types) + for i in xrange(num): + print ' %d: %s' % (i, supported_chart_types[i]) + exit + +def is_valid_chart_type(chart_type): + return chart_type >= 0 and chart_type < len(get_supported_chart_types()) + +if __name__ == '__main__': + if len(sys.argv) < 4: + print_help() + else: + chart_type = int(sys.argv[1]) + if not is_valid_chart_type(chart_type): + print_help() + path_to_png = sys.argv[2] + if not path_to_png.endswith('.png'): + print 'Path must ends with png' % path_to_png + exit + path_to_logs = sys.argv[3:] + for path_to_log in path_to_logs: + if not os.path.exists(path_to_log): + print 'Path does not exist: %s' % path_to_log + exit + if not path_to_log.endswith(get_log_file_suffix()): + print_help() + ## plot_chart accpets multiple path_to_logs + plot_chart(chart_type, path_to_png, path_to_logs) diff --git a/modules/dnns_easily_fooled/caffe/tools/extra/resize_and_crop_images.py b/modules/dnns_easily_fooled/caffe/tools/extra/resize_and_crop_images.py new file mode 100755 index 000000000..0ab75dc2a --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/extra/resize_and_crop_images.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python +from mincepie import mapreducer, launcher +import gflags +import os +import cv2 +import PIL + +# gflags +gflags.DEFINE_string('image_lib', 'opencv', + 'OpenCV or PIL, case insensitive. The default value is the faster OpenCV.') +gflags.DEFINE_string('input_folder', '', + 'The folder that contains all input images, organized in synsets.') +gflags.DEFINE_integer('output_side_length', 256, + 'Expected side length of the output image.') +gflags.DEFINE_string('output_folder', '', + 'The folder that we write output resized and cropped images to') +FLAGS = gflags.FLAGS + +class OpenCVResizeCrop: + def resize_and_crop_image(self, input_file, output_file, output_side_length = 256): + '''Takes an image name, resize it and crop the center square + ''' + img = cv2.imread(input_file) + height, width, depth = img.shape + new_height = output_side_length + new_width = output_side_length + if height > width: + new_height = output_side_length * height / width + else: + new_width = output_side_length * width / height + resized_img = cv2.resize(img, (new_width, new_height)) + height_offset = (new_height - output_side_length) / 2 + width_offset = (new_width - output_side_length) / 2 + cropped_img = resized_img[height_offset:height_offset + output_side_length, + width_offset:width_offset + output_side_length] + cv2.imwrite(output_file, cropped_img) + +class PILResizeCrop: +## http://united-coders.com/christian-harms/image-resizing-tips-every-coder-should-know/ + def resize_and_crop_image(self, input_file, output_file, output_side_length = 256): + '''Downsample the image. + ''' + box = (output_side_length, output_side_length) + #preresize image with factor 2, 4, 8 and fast algorithm + factor = 1 + while img.size[0]/factor > 2*box[0] and img.size[1]*2/factor > 2*box[1]: + factor *=2 + if factor > 1: + img.thumbnail((img.size[0]/factor, img.size[1]/factor), Image.NEAREST) + + #calculate the cropping box and get the cropped part + if fit: + x1 = y1 = 0 + x2, y2 = img.size + wRatio = 1.0 * x2/box[0] + hRatio = 1.0 * y2/box[1] + if hRatio > wRatio: + y1 = int(y2/2-box[1]*wRatio/2) + y2 = int(y2/2+box[1]*wRatio/2) + else: + x1 = int(x2/2-box[0]*hRatio/2) + x2 = int(x2/2+box[0]*hRatio/2) + img = img.crop((x1,y1,x2,y2)) + + #Resize the image with best quality algorithm ANTI-ALIAS + img.thumbnail(box, Image.ANTIALIAS) + + #save it into a file-like object + with open(output_file, 'wb') as out: + img.save(out, 'JPEG', quality=75) + +class ResizeCropImagesMapper(mapreducer.BasicMapper): + '''The ImageNet Compute mapper. + The input value would be the file listing images' paths relative to input_folder. + ''' + def map(self, key, value): + if type(value) is not str: + value = str(value) + files = [value] + image_lib = FLAGS.image_lib.lower() + if image_lib == 'pil': + resize_crop = PILResizeCrop() + else: + resize_crop = OpenCVResizeCrop() + for i, line in enumerate(files): + try: + line = line.replace(FLAGS.input_folder, '').strip() + line = line.split() + image_file_name = line[0] + input_file = os.path.join(FLAGS.input_folder, image_file_name) + output_file = os.path.join(FLAGS.output_folder, image_file_name) + output_dir = output_file[:output_file.rfind('/')] + if not os.path.exists(output_dir): + os.makedirs(output_dir) + feat = resize_crop.resize_and_crop_image(input_file, output_file, + FLAGS.output_side_length) + except Exception, e: + # we ignore the exception (maybe the image is corrupted?) + print line, Exception, e + yield value, FLAGS.output_folder + +mapreducer.REGISTER_DEFAULT_MAPPER(ResizeCropImagesMapper) + +mapreducer.REGISTER_DEFAULT_READER(mapreducer.FileReader) +mapreducer.REGISTER_DEFAULT_WRITER(mapreducer.FileWriter) + +if __name__ == '__main__': + launcher.launch() diff --git a/modules/dnns_easily_fooled/caffe/tools/extract_features.cpp b/modules/dnns_easily_fooled/caffe/tools/extract_features.cpp new file mode 100644 index 000000000..cdad6676d --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/extract_features.cpp @@ -0,0 +1,166 @@ +// Copyright 2014 BVLC and contributors. + +#include // for snprintf +#include +#include +#include +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/net.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/io.hpp" + +using namespace caffe; // NOLINT(build/namespaces) + +template +int feature_extraction_pipeline(int argc, char** argv); + +int main(int argc, char** argv) { + return feature_extraction_pipeline(argc, argv); +// return feature_extraction_pipeline(argc, argv); +} + +template +int feature_extraction_pipeline(int argc, char** argv) { + const int num_required_args = 6; + if (argc < num_required_args) { + LOG(ERROR)<< + "This program takes in a trained network and an input data layer, and then" + " extract features of the input data produced by the net.\n" + "Usage: demo_extract_features pretrained_net_param" + " feature_extraction_proto_file extract_feature_blob_name" + " save_feature_leveldb_name num_mini_batches [CPU/GPU] [DEVICE_ID=0]"; + return 1; + } + int arg_pos = num_required_args; + + arg_pos = num_required_args; + if (argc > arg_pos && strcmp(argv[arg_pos], "GPU") == 0) { + LOG(ERROR)<< "Using GPU"; + uint device_id = 0; + if (argc > arg_pos + 1) { + device_id = atoi(argv[arg_pos + 1]); + CHECK_GE(device_id, 0); + } + LOG(ERROR) << "Using Device_id=" << device_id; + Caffe::SetDevice(device_id); + Caffe::set_mode(Caffe::GPU); + } else { + LOG(ERROR) << "Using CPU"; + Caffe::set_mode(Caffe::CPU); + } + Caffe::set_phase(Caffe::TEST); + + arg_pos = 0; // the name of the executable + string pretrained_binary_proto(argv[++arg_pos]); + + // Expected prototxt contains at least one data layer such as + // the layer data_layer_name and one feature blob such as the + // fc7 top blob to extract features. + /* + layers { + name: "data_layer_name" + type: DATA + data_param { + source: "/path/to/your/images/to/extract/feature/images_leveldb" + mean_file: "/path/to/your/image_mean.binaryproto" + batch_size: 128 + crop_size: 227 + mirror: false + } + top: "data_blob_name" + top: "label_blob_name" + } + layers { + name: "drop7" + type: DROPOUT + dropout_param { + dropout_ratio: 0.5 + } + bottom: "fc7" + top: "fc7" + } + */ + string feature_extraction_proto(argv[++arg_pos]); + shared_ptr > feature_extraction_net( + new Net(feature_extraction_proto)); + feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto); + + string extract_feature_blob_name(argv[++arg_pos]); + CHECK(feature_extraction_net->has_blob(extract_feature_blob_name)) + << "Unknown feature blob name " << extract_feature_blob_name + << " in the network " << feature_extraction_proto; + + string save_feature_leveldb_name(argv[++arg_pos]); + leveldb::DB* db; + leveldb::Options options; + options.error_if_exists = true; + options.create_if_missing = true; + options.write_buffer_size = 268435456; + LOG(INFO)<< "Opening leveldb " << save_feature_leveldb_name; + leveldb::Status status = leveldb::DB::Open(options, + save_feature_leveldb_name.c_str(), + &db); + CHECK(status.ok()) << "Failed to open leveldb " << save_feature_leveldb_name; + + int num_mini_batches = atoi(argv[++arg_pos]); + + LOG(ERROR)<< "Extacting Features"; + + Datum datum; + leveldb::WriteBatch* batch = new leveldb::WriteBatch(); + const int kMaxKeyStrLength = 100; + char key_str[kMaxKeyStrLength]; + int num_bytes_of_binary_code = sizeof(Dtype); + vector*> input_vec; + int image_index = 0; + for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) { + feature_extraction_net->Forward(input_vec); + const shared_ptr > feature_blob = feature_extraction_net + ->blob_by_name(extract_feature_blob_name); + int num_features = feature_blob->num(); + int dim_features = feature_blob->count() / num_features; + Dtype* feature_blob_data; + for (int n = 0; n < num_features; ++n) { + datum.set_height(dim_features); + datum.set_width(1); + datum.set_channels(1); + datum.clear_data(); + datum.clear_float_data(); + feature_blob_data = feature_blob->mutable_cpu_data() + + feature_blob->offset(n); + for (int d = 0; d < dim_features; ++d) { + datum.add_float_data(feature_blob_data[d]); + } + string value; + datum.SerializeToString(&value); + snprintf(key_str, kMaxKeyStrLength, "%d", image_index); + batch->Put(string(key_str), value); + ++image_index; + if (image_index % 1000 == 0) { + db->Write(leveldb::WriteOptions(), batch); + LOG(ERROR)<< "Extracted features of " << image_index << + " query images."; + delete batch; + batch = new leveldb::WriteBatch(); + } + } // for (int n = 0; n < num_features; ++n) + } // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) + // write the last batch + if (image_index % 1000 != 0) { + db->Write(leveldb::WriteOptions(), batch); + LOG(ERROR)<< "Extracted features of " << image_index << + " query images."; + } + + delete batch; + delete db; + LOG(ERROR)<< "Successfully extracted the features!"; + return 0; +} + diff --git a/modules/dnns_easily_fooled/caffe/tools/finetune_net.cpp b/modules/dnns_easily_fooled/caffe/tools/finetune_net.cpp new file mode 100644 index 000000000..c1cd788a1 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/finetune_net.cpp @@ -0,0 +1,33 @@ +// Copyright 2014 BVLC and contributors. +// +// This is a simple script that allows one to quickly finetune a network. +// Usage: +// finetune_net solver_proto_file pretrained_net + +#include + +#include + +#include "caffe/caffe.hpp" + +using namespace caffe; // NOLINT(build/namespaces) + +int main(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + if (argc != 3) { + LOG(ERROR) << "Usage: finetune_net solver_proto_file pretrained_net"; + return 1; + } + + SolverParameter solver_param; + ReadProtoFromTextFileOrDie(argv[1], &solver_param); + + LOG(INFO) << "Starting Optimization"; + SGDSolver solver(solver_param); + LOG(INFO) << "Loading from " << argv[2]; + solver.net()->CopyTrainedLayersFrom(string(argv[2])); + solver.Solve(); + LOG(INFO) << "Optimization Done."; + + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/net_speed_benchmark.cpp b/modules/dnns_easily_fooled/caffe/tools/net_speed_benchmark.cpp new file mode 100644 index 000000000..36a00779f --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/net_speed_benchmark.cpp @@ -0,0 +1,101 @@ +// Copyright 2014 BVLC and contributors. + +#include +#include +#include + +#include +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/net.hpp" +#include "caffe/filler.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/benchmark.hpp" +#include "caffe/util/io.hpp" +#include "caffe/solver.hpp" + +using namespace caffe; // NOLINT(build/namespaces) + +int main(int argc, char** argv) { + int total_iter = 50; + if (argc < 2 || argc > 5) { + LOG(ERROR) << "net_speed_benchmark net_proto [iterations=50]" + " [CPU/GPU] [Device_id=0]"; + return 1; + } + + if (argc >=3) { + total_iter = atoi(argv[2]); + } + + LOG(ERROR) << "Testing for " << total_iter << "Iterations."; + + if (argc >= 4 && strcmp(argv[3], "GPU") == 0) { + LOG(ERROR) << "Using GPU"; + uint device_id = 0; + if (argc >= 5 && strcmp(argv[3], "GPU") == 0) { + device_id = atoi(argv[4]); + } + LOG(ERROR) << "Using Device_id=" << device_id; + Caffe::SetDevice(device_id); + Caffe::set_mode(Caffe::GPU); + } else { + LOG(ERROR) << "Using CPU"; + Caffe::set_mode(Caffe::CPU); + } + + Caffe::set_phase(Caffe::TRAIN); + Net caffe_net(argv[1]); + + // Run the network without training. + LOG(ERROR) << "Performing Forward"; + // Note that for the speed benchmark, we will assume that the network does + // not take any input blobs. + float initial_loss; + caffe_net.Forward(vector*>(), &initial_loss); + LOG(ERROR) << "Initial loss: " << initial_loss; + LOG(ERROR) << "Performing Backward"; + caffe_net.Backward(); + + const vector > >& layers = caffe_net.layers(); + vector*> >& bottom_vecs = caffe_net.bottom_vecs(); + vector*> >& top_vecs = caffe_net.top_vecs(); + LOG(ERROR) << "*** Benchmark begins ***"; + Timer total_timer; + total_timer.Start(); + Timer forward_timer; + forward_timer.Start(); + Timer timer; + for (int i = 0; i < layers.size(); ++i) { + const string& layername = layers[i]->layer_param().name(); + timer.Start(); + for (int j = 0; j < total_iter; ++j) { + layers[i]->Forward(bottom_vecs[i], &top_vecs[i]); + } + LOG(ERROR) << layername << "\tforward: " << timer.MilliSeconds() << + " milli seconds."; + } + LOG(ERROR) << "Forward pass: " << forward_timer.MilliSeconds() << + " milli seconds."; + Timer backward_timer; + backward_timer.Start(); + for (int i = layers.size() - 1; i >= 0; --i) { + const string& layername = layers[i]->layer_param().name(); + timer.Start(); + for (int j = 0; j < total_iter; ++j) { + layers[i]->Backward(top_vecs[i], true, &bottom_vecs[i]); + } + LOG(ERROR) << layername << "\tbackward: " + << timer.MilliSeconds() << " milli seconds."; + } + LOG(ERROR) << "Backward pass: " << backward_timer.MilliSeconds() << + " milli seconds."; + LOG(ERROR) << "Total Time: " << total_timer.MilliSeconds() << + " milli seconds."; + LOG(ERROR) << "*** Benchmark ends ***"; + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/test_net.cpp b/modules/dnns_easily_fooled/caffe/tools/test_net.cpp new file mode 100644 index 000000000..c5819ec71 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/test_net.cpp @@ -0,0 +1,57 @@ +// Copyright 2014 BVLC and contributors. +// +// This is a simple script that allows one to quickly test a network whose +// structure is specified by text format protocol buffers, and whose parameter +// are loaded from a pre-trained network. +// Usage: +// test_net net_proto pretrained_net_proto iterations [CPU/GPU] + +#include + +#include +#include +#include + +#include "caffe/caffe.hpp" + +using namespace caffe; // NOLINT(build/namespaces) + +int main(int argc, char** argv) { + if (argc < 4 || argc > 6) { + LOG(ERROR) << "test_net net_proto pretrained_net_proto iterations " + << "[CPU/GPU] [Device ID]"; + return 1; + } + + Caffe::set_phase(Caffe::TEST); + + if (argc >= 5 && strcmp(argv[4], "GPU") == 0) { + Caffe::set_mode(Caffe::GPU); + int device_id = 0; + if (argc == 6) { + device_id = atoi(argv[5]); + } + Caffe::SetDevice(device_id); + LOG(ERROR) << "Using GPU #" << device_id; + } else { + LOG(ERROR) << "Using CPU"; + Caffe::set_mode(Caffe::CPU); + } + + Net caffe_test_net(argv[1]); + caffe_test_net.CopyTrainedLayersFrom(argv[2]); + + int total_iter = atoi(argv[3]); + LOG(ERROR) << "Running " << total_iter << " iterations."; + + double test_accuracy = 0; + for (int i = 0; i < total_iter; ++i) { + const vector*>& result = caffe_test_net.ForwardPrefilled(); + test_accuracy += result[0]->cpu_data()[0]; + LOG(ERROR) << "Batch " << i << ", accuracy: " << result[0]->cpu_data()[0]; + } + test_accuracy /= total_iter; + LOG(ERROR) << "Test accuracy: " << test_accuracy; + + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/train_net.cpp b/modules/dnns_easily_fooled/caffe/tools/train_net.cpp new file mode 100644 index 000000000..7c6f23e62 --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/train_net.cpp @@ -0,0 +1,37 @@ +// Copyright 2014 BVLC and contributors. +// +// This is a simple script that allows one to quickly train a network whose +// parameters are specified by text format protocol buffers. +// Usage: +// train_net net_proto_file solver_proto_file [resume_point_file] + +#include + +#include + +#include "caffe/caffe.hpp" + +using namespace caffe; // NOLINT(build/namespaces) + +int main(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + if (argc < 2 || argc > 3) { + LOG(ERROR) << "Usage: train_net solver_proto_file [resume_point_file]"; + return 1; + } + + SolverParameter solver_param; + ReadProtoFromTextFileOrDie(argv[1], &solver_param); + + LOG(INFO) << "Starting Optimization"; + SGDSolver solver(solver_param); + if (argc == 3) { + LOG(INFO) << "Resuming from " << argv[2]; + solver.Solve(argv[2]); + } else { + solver.Solve(); + } + LOG(INFO) << "Optimization Done."; + + return 0; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/upgrade_net_proto_binary.cpp b/modules/dnns_easily_fooled/caffe/tools/upgrade_net_proto_binary.cpp new file mode 100644 index 000000000..928fc52dc --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/upgrade_net_proto_binary.cpp @@ -0,0 +1,46 @@ +// Copyright 2014 BVLC and contributors. +// +// This is a script to upgrade "V0" network prototxts to the new format. +// Usage: +// upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out + +#include +#include // NOLINT(readability/streams) +#include // NOLINT(readability/streams) + +#include "caffe/caffe.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/upgrade_proto.hpp" + +using std::ofstream; + +using namespace caffe; // NOLINT(build/namespaces) + +int main(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + if (argc != 3) { + LOG(ERROR) << "Usage: " + << "upgrade_net_proto_binary v0_net_proto_file_in net_proto_file_out"; + return 1; + } + + NetParameter net_param; + if (!ReadProtoFromBinaryFile(argv[1], &net_param)) { + LOG(ERROR) << "Failed to parse input binary file as NetParameter: " + << argv[1]; + return 2; + } + bool need_upgrade = NetNeedsUpgrade(net_param); + bool success = true; + if (need_upgrade) { + NetParameter v0_net_param(net_param); + success = UpgradeV0Net(v0_net_param, &net_param); + } else { + LOG(ERROR) << "File already in V1 proto format: " << argv[1]; + } + + WriteProtoToBinaryFile(net_param, argv[2]); + + LOG(ERROR) << "Wrote upgraded NetParameter binary proto to " << argv[2]; + return !success; +} diff --git a/modules/dnns_easily_fooled/caffe/tools/upgrade_net_proto_text.cpp b/modules/dnns_easily_fooled/caffe/tools/upgrade_net_proto_text.cpp new file mode 100644 index 000000000..8a77f752c --- /dev/null +++ b/modules/dnns_easily_fooled/caffe/tools/upgrade_net_proto_text.cpp @@ -0,0 +1,52 @@ +// Copyright 2014 BVLC and contributors. +// +// This is a script to upgrade "V0" network prototxts to the new format. +// Usage: +// upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out + +#include +#include // NOLINT(readability/streams) +#include // NOLINT(readability/streams) + +#include "caffe/caffe.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/upgrade_proto.hpp" + +using std::ofstream; + +using namespace caffe; // NOLINT(build/namespaces) + +int main(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + if (argc != 3) { + LOG(ERROR) << "Usage: " + << "upgrade_net_proto_text v0_net_proto_file_in net_proto_file_out"; + return 1; + } + + NetParameter net_param; + if (!ReadProtoFromTextFile(argv[1], &net_param)) { + LOG(ERROR) << "Failed to parse input text file as NetParameter: " + << argv[1]; + return 2; + } + bool need_upgrade = NetNeedsUpgrade(net_param); + bool success = true; + if (need_upgrade) { + NetParameter v0_net_param(net_param); + success = UpgradeV0Net(v0_net_param, &net_param); + } else { + LOG(ERROR) << "File already in V1 proto format: " << argv[1]; + } + + // Convert to a NetParameterPrettyPrint to print fields in desired + // order. + NetParameterPrettyPrint net_param_pretty; + NetParameterToPrettyPrint(net_param, &net_param_pretty); + + // Save new format prototxt. + WriteProtoToTextFile(net_param_pretty, argv[2]); + + LOG(ERROR) << "Wrote upgraded NetParameter text proto to " << argv[2]; + return !success; +} diff --git a/modules/dnns_easily_fooled/model/hen_256.png b/modules/dnns_easily_fooled/model/hen_256.png new file mode 100644 index 000000000..f3218b4ed Binary files /dev/null and b/modules/dnns_easily_fooled/model/hen_256.png differ diff --git a/modules/dnns_easily_fooled/model/image_list.txt b/modules/dnns_easily_fooled/model/image_list.txt new file mode 100644 index 000000000..8e462c573 --- /dev/null +++ b/modules/dnns_easily_fooled/model/image_list.txt @@ -0,0 +1 @@ +/home/anh/workspace/sferes/exp/images/imagenet/hen_256.png 1 \ No newline at end of file diff --git a/modules/dnns_easily_fooled/model/imagenet_deploy_image_memory_data.prototxt b/modules/dnns_easily_fooled/model/imagenet_deploy_image_memory_data.prototxt new file mode 100644 index 000000000..5e4b30788 --- /dev/null +++ b/modules/dnns_easily_fooled/model/imagenet_deploy_image_memory_data.prototxt @@ -0,0 +1,223 @@ +name: "CaffeNet" +layers { + name: "data" + type: IMAGE_DATA + top: "data" + top: "label" + image_data_param { + source: "/home/anh/workspace/sferes/exp/images/imagenet/image_list.txt" + mean_file: "/home/anh/src/caffe/data/ilsvrc12/imagenet_mean.binaryproto" + batch_size: 10 + crop_size: 227 + mirror: false + new_height: 256 + new_width: 256 + images_in_color: true + } +} +layers { + name: "conv1" + type: CONVOLUTION + bottom: "data" + top: "conv1" + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + } +} +layers { + name: "relu1" + type: RELU + bottom: "conv1" + top: "conv1" +} +layers { + name: "pool1" + type: POOLING + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm1" + type: LRN + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv2" + type: CONVOLUTION + bottom: "norm1" + top: "conv2" + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + } +} +layers { + name: "relu2" + type: RELU + bottom: "conv2" + top: "conv2" +} +layers { + name: "pool2" + type: POOLING + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm2" + type: LRN + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv3" + type: CONVOLUTION + bottom: "norm2" + top: "conv3" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + } +} +layers { + name: "relu3" + type: RELU + bottom: "conv3" + top: "conv3" +} +layers { + name: "conv4" + type: CONVOLUTION + bottom: "conv3" + top: "conv4" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + } +} +layers { + name: "relu4" + type: RELU + bottom: "conv4" + top: "conv4" +} +layers { + name: "conv5" + type: CONVOLUTION + bottom: "conv4" + top: "conv5" + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + } +} +layers { + name: "relu5" + type: RELU + bottom: "conv5" + top: "conv5" +} +layers { + name: "pool5" + type: POOLING + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "fc6" + type: INNER_PRODUCT + bottom: "pool5" + top: "fc6" + inner_product_param { + num_output: 4096 + } +} +layers { + name: "relu6" + type: RELU + bottom: "fc6" + top: "fc6" +} +layers { + name: "drop6" + type: DROPOUT + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc7" + type: INNER_PRODUCT + bottom: "fc6" + top: "fc7" + inner_product_param { + num_output: 4096 + } +} +layers { + name: "relu7" + type: RELU + bottom: "fc7" + top: "fc7" +} +layers { + name: "drop7" + type: DROPOUT + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc8" + type: INNER_PRODUCT + bottom: "fc7" + top: "fc8" + inner_product_param { + num_output: 1000 + } +} +layers { + name: "prob" + type: SOFTMAX + bottom: "fc8" + top: "prob" +} diff --git a/modules/dnns_easily_fooled/model/lenet/lenet_image_memory_data.prototxt b/modules/dnns_easily_fooled/model/lenet/lenet_image_memory_data.prototxt new file mode 100644 index 000000000..2ecb02158 --- /dev/null +++ b/modules/dnns_easily_fooled/model/lenet/lenet_image_memory_data.prototxt @@ -0,0 +1,123 @@ +name: "LeNet" +layers { + name: "data" + type: IMAGE_DATA + top: "data" + top: "label" + image_data_param { + source: "/project/EvolvingAI/anguyen8/model/mnist_image_list.txt" + mean_file: "/project/EvolvingAI/anguyen8/model/mnist_mean.binaryproto" + batch_size: 1 + mirror: false + new_height: 28 + new_width: 28 + scale: 0.00390625 + images_in_color: false + } +} +layers { + name: "conv1" + type: CONVOLUTION + bottom: "data" + top: "conv1" + blobs_lr: 1 + blobs_lr: 2 + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layers { + name: "pool1" + type: POOLING + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layers { + name: "conv2" + type: CONVOLUTION + bottom: "pool1" + top: "conv2" + blobs_lr: 1 + blobs_lr: 2 + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layers { + name: "pool2" + type: POOLING + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layers { + name: "ip1" + type: INNER_PRODUCT + bottom: "pool2" + top: "ip1" + blobs_lr: 1 + blobs_lr: 2 + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layers { + name: "relu1" + type: RELU + bottom: "ip1" + top: "ip1" +} +layers { + name: "ip2" + type: INNER_PRODUCT + bottom: "ip1" + top: "ip2" + blobs_lr: 1 + blobs_lr: 2 + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layers { + name: "prob" + type: SOFTMAX + bottom: "ip2" + top: "prob" +} diff --git a/modules/dnns_easily_fooled/model/lenet/lenet_iter_10000 b/modules/dnns_easily_fooled/model/lenet/lenet_iter_10000 new file mode 100644 index 000000000..3ce648004 Binary files /dev/null and b/modules/dnns_easily_fooled/model/lenet/lenet_iter_10000 differ diff --git a/modules/dnns_easily_fooled/model/lenet/mnist_image_list.txt b/modules/dnns_easily_fooled/model/lenet/mnist_image_list.txt new file mode 100644 index 000000000..c19e3d42b --- /dev/null +++ b/modules/dnns_easily_fooled/model/lenet/mnist_image_list.txt @@ -0,0 +1 @@ +/project/EvolvingAI/anguyen8/model/mnist_sample_image.png 0 diff --git a/modules/dnns_easily_fooled/model/lenet/mnist_mean.binaryproto b/modules/dnns_easily_fooled/model/lenet/mnist_mean.binaryproto new file mode 100644 index 000000000..026e4db78 Binary files /dev/null and b/modules/dnns_easily_fooled/model/lenet/mnist_mean.binaryproto differ diff --git a/modules/dnns_easily_fooled/model/lenet/mnist_sample_image.png b/modules/dnns_easily_fooled/model/lenet/mnist_sample_image.png new file mode 100644 index 000000000..5c54ed40f Binary files /dev/null and b/modules/dnns_easily_fooled/model/lenet/mnist_sample_image.png differ diff --git a/modules/dnns_easily_fooled/sferes/.cproject b/modules/dnns_easily_fooled/sferes/.cproject new file mode 100644 index 000000000..320a1b69d --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/.cproject @@ -0,0 +1,210 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /home/anh/workspace/sferes/waf + + configure --boost-include=/home/anh/src/sferes/include --boost-lib=/home/anh/src/sferes/lib --eigen3=/home/anh/src/sferes/include --mpi=/home/anh/openmpi + true + true + true + + + + diff --git a/modules/dnns_easily_fooled/sferes/.gitignore b/modules/dnns_easily_fooled/sferes/.gitignore new file mode 100644 index 000000000..7df156e8b --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/.gitignore @@ -0,0 +1,6 @@ +*.pyc +build/ +.waf-1.5.14*/ +.Totti-*/ +Totti-*/ +.lock-wscript diff --git a/modules/dnns_easily_fooled/sferes/.project b/modules/dnns_easily_fooled/sferes/.project new file mode 100644 index 000000000..163f9132d --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/.project @@ -0,0 +1,33 @@ + + + sferes + + + + + + org.eclipse.cdt.managedbuilder.core.genmakebuilder + + + + + org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder + full,incremental, + + + + + + org.eclipse.cdt.core.cnature + org.eclipse.cdt.core.ccnature + org.eclipse.cdt.managedbuilder.core.managedBuildNature + org.eclipse.cdt.managedbuilder.core.ScannerConfigNature + + + + src + 2 + /home/anh/src/caffe/src + + + diff --git a/modules/dnns_easily_fooled/sferes/COPYING b/modules/dnns_easily_fooled/sferes/COPYING new file mode 100644 index 000000000..fcc8df26b --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/COPYING @@ -0,0 +1,506 @@ + +CeCILL FREE SOFTWARE LICENSE AGREEMENT + + + Notice + +This Agreement is a Free Software license agreement that is the result +of discussions between its authors in order to ensure compliance with +the two main principles guiding its drafting: + + * firstly, compliance with the principles governing the distribution + of Free Software: access to source code, broad rights granted to + users, + * secondly, the election of a governing law, French law, with which + it is conformant, both as regards the law of torts and + intellectual property law, and the protection that it offers to + both authors and holders of the economic rights over software. + +The authors of the CeCILL (for Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre]) +license are: + +Commissariat l'Energie Atomique - CEA, a public scientific, technical +and industrial research establishment, having its principal place of +business at 25 rue Leblanc, immeuble Le Ponant D, 75015 Paris, France. + +Centre National de la Recherche Scientifique - CNRS, a public scientific +and technological establishment, having its principal place of business +at 3 rue Michel-Ange, 75794 Paris cedex 16, France. + +Institut National de Recherche en Informatique et en Automatique - +INRIA, a public scientific and technological establishment, having its +principal place of business at Domaine de Voluceau, Rocquencourt, BP +105, 78153 Le Chesnay cedex, France. + + + Preamble + +The purpose of this Free Software license agreement is to grant users +the right to modify and redistribute the software governed by this +license within the framework of an open source distribution model. + +The exercising of these rights is conditional upon certain obligations +for users so as to preserve this status for all subsequent redistributions. + +In consideration of access to the source code and the rights to copy, +modify and redistribute granted by the license, users are provided only +with a limited warranty and the software's author, the holder of the +economic rights, and the successive licensors only have limited liability. + +In this respect, the risks associated with loading, using, modifying +and/or developing or reproducing the software by the user are brought to +the user's attention, given its Free Software status, which may make it +complicated to use, with the result that its use is reserved for +developers and experienced professionals having in-depth computer +knowledge. Users are therefore encouraged to load and test the +suitability of the software as regards their requirements in conditions +enabling the security of their systems and/or data to be ensured and, +more generally, to use and operate it in the same conditions of +security. This Agreement may be freely reproduced and published, +provided it is not altered, and that no provisions are either added or +removed herefrom. + +This Agreement may apply to any or all software for which the holder of +the economic rights decides to submit the use thereof to its provisions. + + + Article 1 - DEFINITIONS + +For the purpose of this Agreement, when the following expressions +commence with a capital letter, they shall have the following meaning: + +Agreement: means this license agreement, and its possible subsequent +versions and annexes. + +Software: means the software in its Object Code and/or Source Code form +and, where applicable, its documentation, "as is" when the Licensee +accepts the Agreement. + +Initial Software: means the Software in its Source Code and possibly its +Object Code form and, where applicable, its documentation, "as is" when +it is first distributed under the terms and conditions of the Agreement. + +Modified Software: means the Software modified by at least one +Contribution. + +Source Code: means all the Software's instructions and program lines to +which access is required so as to modify the Software. + +Object Code: means the binary files originating from the compilation of +the Source Code. + +Holder: means the holder(s) of the economic rights over the Initial +Software. + +Licensee: means the Software user(s) having accepted the Agreement. + +Contributor: means a Licensee having made at least one Contribution. + +Licensor: means the Holder, or any other individual or legal entity, who +distributes the Software under the Agreement. + +Contribution: means any or all modifications, corrections, translations, +adaptations and/or new functions integrated into the Software by any or +all Contributors, as well as any or all Internal Modules. + +Module: means a set of sources files including their documentation that +enables supplementary functions or services in addition to those offered +by the Software. + +External Module: means any or all Modules, not derived from the +Software, so that this Module and the Software run in separate address +spaces, with one calling the other when they are run. + +Internal Module: means any or all Module, connected to the Software so +that they both execute in the same address space. + +GNU GPL: means the GNU General Public License version 2 or any +subsequent version, as published by the Free Software Foundation Inc. + +Parties: mean both the Licensee and the Licensor. + +These expressions may be used both in singular and plural form. + + + Article 2 - PURPOSE + +The purpose of the Agreement is the grant by the Licensor to the +Licensee of a non-exclusive, transferable and worldwide license for the +Software as set forth in Article 5 hereinafter for the whole term of the +protection granted by the rights over said Software. + + + Article 3 - ACCEPTANCE + +3.1 The Licensee shall be deemed as having accepted the terms and +conditions of this Agreement upon the occurrence of the first of the +following events: + + * (i) loading the Software by any or all means, notably, by + downloading from a remote server, or by loading from a physical + medium; + * (ii) the first time the Licensee exercises any of the rights + granted hereunder. + +3.2 One copy of the Agreement, containing a notice relating to the +characteristics of the Software, to the limited warranty, and to the +fact that its use is restricted to experienced users has been provided +to the Licensee prior to its acceptance as set forth in Article 3.1 +hereinabove, and the Licensee hereby acknowledges that it has read and +understood it. + + + Article 4 - EFFECTIVE DATE AND TERM + + + 4.1 EFFECTIVE DATE + +The Agreement shall become effective on the date when it is accepted by +the Licensee as set forth in Article 3.1. + + + 4.2 TERM + +The Agreement shall remain in force for the entire legal term of +protection of the economic rights over the Software. + + + Article 5 - SCOPE OF RIGHTS GRANTED + +The Licensor hereby grants to the Licensee, who accepts, the following +rights over the Software for any or all use, and for the term of the +Agreement, on the basis of the terms and conditions set forth hereinafter. + +Besides, if the Licensor owns or comes to own one or more patents +protecting all or part of the functions of the Software or of its +components, the Licensor undertakes not to enforce the rights granted by +these patents against successive Licensees using, exploiting or +modifying the Software. If these patents are transferred, the Licensor +undertakes to have the transferees subscribe to the obligations set +forth in this paragraph. + + + 5.1 RIGHT OF USE + +The Licensee is authorized to use the Software, without any limitation +as to its fields of application, with it being hereinafter specified +that this comprises: + + 1. permanent or temporary reproduction of all or part of the Software + by any or all means and in any or all form. + + 2. loading, displaying, running, or storing the Software on any or + all medium. + + 3. entitlement to observe, study or test its operation so as to + determine the ideas and principles behind any or all constituent + elements of said Software. This shall apply when the Licensee + carries out any or all loading, displaying, running, transmission + or storage operation as regards the Software, that it is entitled + to carry out hereunder. + + + 5.2 ENTITLEMENT TO MAKE CONTRIBUTIONS + +The right to make Contributions includes the right to translate, adapt, +arrange, or make any or all modifications to the Software, and the right +to reproduce the resulting software. + +The Licensee is authorized to make any or all Contributions to the +Software provided that it includes an explicit notice that it is the +author of said Contribution and indicates the date of the creation thereof. + + + 5.3 RIGHT OF DISTRIBUTION + +In particular, the right of distribution includes the right to publish, +transmit and communicate the Software to the general public on any or +all medium, and by any or all means, and the right to market, either in +consideration of a fee, or free of charge, one or more copies of the +Software by any means. + +The Licensee is further authorized to distribute copies of the modified +or unmodified Software to third parties according to the terms and +conditions set forth hereinafter. + + + 5.3.1 DISTRIBUTION OF SOFTWARE WITHOUT MODIFICATION + +The Licensee is authorized to distribute true copies of the Software in +Source Code or Object Code form, provided that said distribution +complies with all the provisions of the Agreement and is accompanied by: + + 1. a copy of the Agreement, + + 2. a notice relating to the limitation of both the Licensor's + warranty and liability as set forth in Articles 8 and 9, + +and that, in the event that only the Object Code of the Software is +redistributed, the Licensee allows future Licensees unhindered access to +the full Source Code of the Software by indicating how to access it, it +being understood that the additional cost of acquiring the Source Code +shall not exceed the cost of transferring the data. + + + 5.3.2 DISTRIBUTION OF MODIFIED SOFTWARE + +When the Licensee makes a Contribution to the Software, the terms and +conditions for the distribution of the resulting Modified Software +become subject to all the provisions of this Agreement. + +The Licensee is authorized to distribute the Modified Software, in +source code or object code form, provided that said distribution +complies with all the provisions of the Agreement and is accompanied by: + + 1. a copy of the Agreement, + + 2. a notice relating to the limitation of both the Licensor's + warranty and liability as set forth in Articles 8 and 9, + +and that, in the event that only the object code of the Modified +Software is redistributed, the Licensee allows future Licensees +unhindered access to the full source code of the Modified Software by +indicating how to access it, it being understood that the additional +cost of acquiring the source code shall not exceed the cost of +transferring the data. + + + 5.3.3 DISTRIBUTION OF EXTERNAL MODULES + +When the Licensee has developed an External Module, the terms and +conditions of this Agreement do not apply to said External Module, that +may be distributed under a separate license agreement. + + + 5.3.4 COMPATIBILITY WITH THE GNU GPL + +The Licensee can include a code that is subject to the provisions of one +of the versions of the GNU GPL in the Modified or unmodified Software, +and distribute that entire code under the terms of the same version of +the GNU GPL. + +The Licensee can include the Modified or unmodified Software in a code +that is subject to the provisions of one of the versions of the GNU GPL, +and distribute that entire code under the terms of the same version of +the GNU GPL. + + + Article 6 - INTELLECTUAL PROPERTY + + + 6.1 OVER THE INITIAL SOFTWARE + +The Holder owns the economic rights over the Initial Software. Any or +all use of the Initial Software is subject to compliance with the terms +and conditions under which the Holder has elected to distribute its work +and no one shall be entitled to modify the terms and conditions for the +distribution of said Initial Software. + +The Holder undertakes that the Initial Software will remain ruled at +least by this Agreement, for the duration set forth in Article 4.2. + + + 6.2 OVER THE CONTRIBUTIONS + +The Licensee who develops a Contribution is the owner of the +intellectual property rights over this Contribution as defined by +applicable law. + + + 6.3 OVER THE EXTERNAL MODULES + +The Licensee who develops an External Module is the owner of the +intellectual property rights over this External Module as defined by +applicable law and is free to choose the type of agreement that shall +govern its distribution. + + + 6.4 JOINT PROVISIONS + +The Licensee expressly undertakes: + + 1. not to remove, or modify, in any manner, the intellectual property + notices attached to the Software; + + 2. to reproduce said notices, in an identical manner, in the copies + of the Software modified or not. + +The Licensee undertakes not to directly or indirectly infringe the +intellectual property rights of the Holder and/or Contributors on the +Software and to take, where applicable, vis--vis its staff, any and all +measures required to ensure respect of said intellectual property rights +of the Holder and/or Contributors. + + + Article 7 - RELATED SERVICES + +7.1 Under no circumstances shall the Agreement oblige the Licensor to +provide technical assistance or maintenance services for the Software. + +However, the Licensor is entitled to offer this type of services. The +terms and conditions of such technical assistance, and/or such +maintenance, shall be set forth in a separate instrument. Only the +Licensor offering said maintenance and/or technical assistance services +shall incur liability therefor. + +7.2 Similarly, any Licensor is entitled to offer to its licensees, under +its sole responsibility, a warranty, that shall only be binding upon +itself, for the redistribution of the Software and/or the Modified +Software, under terms and conditions that it is free to decide. Said +warranty, and the financial terms and conditions of its application, +shall be subject of a separate instrument executed between the Licensor +and the Licensee. + + + Article 8 - LIABILITY + +8.1 Subject to the provisions of Article 8.2, the Licensee shall be +entitled to claim compensation for any direct loss it may have suffered +from the Software as a result of a fault on the part of the relevant +Licensor, subject to providing evidence thereof. + +8.2 The Licensor's liability is limited to the commitments made under +this Agreement and shall not be incurred as a result of in particular: +(i) loss due the Licensee's total or partial failure to fulfill its +obligations, (ii) direct or consequential loss that is suffered by the +Licensee due to the use or performance of the Software, and (iii) more +generally, any consequential loss. In particular the Parties expressly +agree that any or all pecuniary or business loss (i.e. loss of data, +loss of profits, operating loss, loss of customers or orders, +opportunity cost, any disturbance to business activities) or any or all +legal proceedings instituted against the Licensee by a third party, +shall constitute consequential loss and shall not provide entitlement to +any or all compensation from the Licensor. + + + Article 9 - WARRANTY + +9.1 The Licensee acknowledges that the scientific and technical +state-of-the-art when the Software was distributed did not enable all +possible uses to be tested and verified, nor for the presence of +possible defects to be detected. In this respect, the Licensee's +attention has been drawn to the risks associated with loading, using, +modifying and/or developing and reproducing the Software which are +reserved for experienced users. + +The Licensee shall be responsible for verifying, by any or all means, +the suitability of the product for its requirements, its good working +order, and for ensuring that it shall not cause damage to either persons +or properties. + +9.2 The Licensor hereby represents, in good faith, that it is entitled +to grant all the rights over the Software (including in particular the +rights set forth in Article 5). + +9.3 The Licensee acknowledges that the Software is supplied "as is" by +the Licensor without any other express or tacit warranty, other than +that provided for in Article 9.2 and, in particular, without any warranty +as to its commercial value, its secured, safe, innovative or relevant +nature. + +Specifically, the Licensor does not warrant that the Software is free +from any error, that it will operate without interruption, that it will +be compatible with the Licensee's own equipment and software +configuration, nor that it will meet the Licensee's requirements. + +9.4 The Licensor does not either expressly or tacitly warrant that the +Software does not infringe any third party intellectual property right +relating to a patent, software or any other property right. Therefore, +the Licensor disclaims any and all liability towards the Licensee +arising out of any or all proceedings for infringement that may be +instituted in respect of the use, modification and redistribution of the +Software. Nevertheless, should such proceedings be instituted against +the Licensee, the Licensor shall provide it with technical and legal +assistance for its defense. Such technical and legal assistance shall be +decided on a case-by-case basis between the relevant Licensor and the +Licensee pursuant to a memorandum of understanding. The Licensor +disclaims any and all liability as regards the Licensee's use of the +name of the Software. No warranty is given as regards the existence of +prior rights over the name of the Software or as regards the existence +of a trademark. + + + Article 10 - TERMINATION + +10.1 In the event of a breach by the Licensee of its obligations +hereunder, the Licensor may automatically terminate this Agreement +thirty (30) days after notice has been sent to the Licensee and has +remained ineffective. + +10.2 A Licensee whose Agreement is terminated shall no longer be +authorized to use, modify or distribute the Software. However, any +licenses that it may have granted prior to termination of the Agreement +shall remain valid subject to their having been granted in compliance +with the terms and conditions hereof. + + + Article 11 - MISCELLANEOUS + + + 11.1 EXCUSABLE EVENTS + +Neither Party shall be liable for any or all delay, or failure to +perform the Agreement, that may be attributable to an event of force +majeure, an act of God or an outside cause, such as defective +functioning or interruptions of the electricity or telecommunications +networks, network paralysis following a virus attack, intervention by +government authorities, natural disasters, water damage, earthquakes, +fire, explosions, strikes and labor unrest, war, etc. + +11.2 Any failure by either Party, on one or more occasions, to invoke +one or more of the provisions hereof, shall under no circumstances be +interpreted as being a waiver by the interested Party of its right to +invoke said provision(s) subsequently. + +11.3 The Agreement cancels and replaces any or all previous agreements, +whether written or oral, between the Parties and having the same +purpose, and constitutes the entirety of the agreement between said +Parties concerning said purpose. No supplement or modification to the +terms and conditions hereof shall be effective as between the Parties +unless it is made in writing and signed by their duly authorized +representatives. + +11.4 In the event that one or more of the provisions hereof were to +conflict with a current or future applicable act or legislative text, +said act or legislative text shall prevail, and the Parties shall make +the necessary amendments so as to comply with said act or legislative +text. All other provisions shall remain effective. Similarly, invalidity +of a provision of the Agreement, for any reason whatsoever, shall not +cause the Agreement as a whole to be invalid. + + + 11.5 LANGUAGE + +The Agreement is drafted in both French and English and both versions +are deemed authentic. + + + Article 12 - NEW VERSIONS OF THE AGREEMENT + +12.1 Any person is authorized to duplicate and distribute copies of this +Agreement. + +12.2 So as to ensure coherence, the wording of this Agreement is +protected and may only be modified by the authors of the License, who +reserve the right to periodically publish updates or new versions of the +Agreement, each with a separate number. These subsequent versions may +address new issues encountered by Free Software. + +12.3 Any Software distributed under a given version of the Agreement may +only be subsequently distributed under the same version of the Agreement +or a subsequent version, subject to the provisions of Article 5.3.4. + + + Article 13 - GOVERNING LAW AND JURISDICTION + +13.1 The Agreement is governed by French law. The Parties agree to +endeavor to seek an amicable solution to any disagreements or disputes +that may arise during the performance of the Agreement. + +13.2 Failing an amicable solution within two (2) months as from their +occurrence, and unless emergency proceedings are necessary, the +disagreements or disputes shall be referred to the Paris Courts having +jurisdiction, by the more diligent Party. + + +Version 2.0 dated 2006-09-05. diff --git a/modules/dnns_easily_fooled/sferes/README.md b/modules/dnns_easily_fooled/sferes/README.md new file mode 100644 index 000000000..0f108028f --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/README.md @@ -0,0 +1,107 @@ +You can access this repo with SSH or with HTTPS. + +sferes2 +======= + +Sferes2 is a high-performance, lightweight, generic C++ framework for evolutionary computation. + +**If you use this software in an academic article, please cite:** + +Mouret, J.-B. and Doncieux, S. (2010). SFERESv2: Evolvin' in the Multi-Core World. _Proc. of Congress on Evolutionary Computation (CEC)_ Pages 4079--4086. + +The article is available here: http://www.isir.upmc.fr/files/2010ACTI1524.pdf + +@INPROCEEDINGS{Mouret2010, + AUTHOR = {Mouret, J.-B. and Doncieux, S.}, + TITLE = {{SFERES}v2: Evolvin' in the Multi-Core World}, + YEAR = {2010}, + BOOKTITLE = {Proc. of Congress on Evolutionary Computation (CEC)}, + PAGES = {4079--4086} +} + +Documentation (including instruction for compilation) +------------- + +We are in the process of porting the documentation to the github wiki: https://github.com/jbmouret/sferes2/wiki + +Optional modules +--------------- +- evolvable neural networks: https://github.com/jbmouret/nn2 +- khepera-like simulator: https://github.com/jbmouret/fastsim + + +Design +----- +The following choices were made in the initial design: +- use of modern c++ techniques (template-based programming) to employ object-oriented programming without the cost of virtual functions; +- use of Intel TBB to take full advantages of multicore and SMP systems; +- use of boost libraries when it's useful (shared_ptr, serialization, filesystem, test,...); +- use of MPI to distribute the computational cost on clusters; +- a full set of unit tests; +- no configuration file: a fully optimized executable is built for each particular experiment. + +Sferes2 is extended via modules and experiments. + +Sferes2 work on most Unix systems (in particular, GNU/Linux and OSX). It successfully compiles with gcc, clang and icc. + + +Authors +------- +- Jean-Baptiste Mouret mouret@isir.upmc.frfr: main author and maintainer +- Stephane Doncieux doncieux@isir.upmc.fr +- Paul Tonellitonelli@isir.upmc.fr (documentation) +- Many members of ISIR (http://isir.upmc.fr) + +Academic papers that used Sferes2: +----------------------------------- +*If you used Sferes2 in an academic paper, please send us an e-mail (mouret@isir.upmc.fr) so that we can add it here!* + +(you can find a pdf for most of these publications on http://scholar.google.com). + +### 2014 +- Lesaint, F., Sigaud, O., Clark, J. J., Flagel, S. B., & Khamassi, M. (2014). Experimental predictions drawn from a computational model of sign-trackers and goal-trackers. Journal of Physiology-Paris. +- Lesaint, F., Sigaud, O., Flagel, S. B., Robinson, T. E., & Khamassi, M. (2014). Modelling Individual Differences in the Form of Pavlovian Conditioned Approach Responses: A Dual Learning Systems Approach with Factored Representations. PLoS computational biology, 10(2), e1003466. +- Shrouf, F., Ordieres-Meré, J., García-Sánchez, A., & Ortega-Mier, M. (2014). Optimizing the production scheduling of a single machine to minimize total energy consumption costs. Journal of Cleaner Production, 67, 197-207. +- Huizinga, J., Mouret, J. B., & Clune, J. (2014). Evolving Neural Networks That Are Both Modular and Regular: HyperNeat Plus the Connection Cost Technique. In Proceedings of GECCO (pp. 1-8). +- Li, J., Storie, J., & Clune, J. (2014). Encouraging Creative Thinking in Robots Improves Their Ability to Solve Challenging Problems. Proceedings of GECCO (pp 1-8) +- Tarapore, D. and Mouret, J.-B. (2014). Comparing the evolvability of generative encoding schemes. +Artificial Life 14: Proceedings of the Fourteenth International Conference on the Synthesis and Simulation of Living Systems, MIT Press, publisher. Pages 1-8. + + +### 2013 +- Koos, S. and Cully, A. and Mouret, J.-B. (2013). Fast Damage Recovery in Robotics with the T-Resilience Algorithm. International Journal of Robotics Research. Vol 32 No 14 Pages 1700-1723. +- Tonelli, P. and Mouret, J.-B. (2013). On the Relationships between Generative Encodings, Regularity, and Learning Abilities when Evolving Plastic Artificial Neural Networks. PLoS One. Vol 8 No 11 Pages e79138 +- Clune*, J. and Mouret, J.-B. and Lipson, H. (2013). The evolutionary origins of modularity. Proceedings of the Royal Society B. Vol 280 (J. Clune and J.-B. Mouret contributed equally to this work) Pages 20122863 +- Koos, S. and Mouret, J.-B. and Doncieux, S. (2013). The Transferability Approach: Crossing the Reality Gap in Evolutionary Robotics. IEEE Transactions on Evolutionary Computation. Vol 17 No 1 Pages 122 - 145 +- Doncieux, S. and Mouret, J.B. (2013). Behavioral Diversity with Multiple Behavioral Distances. Proc. of IEEE Congress on Evolutionary Computation, 2013 (CEC 2013). Pages 1-8 +- Cully, A. and Mouret, J.-B. (2013). Behavioral Repertoire Learning in Robotics. Genetic and Evolutionary Computation Conference (GECCO). Pages 175-182. +- Doncieux, S. (2013). Transfer Learning for Direct Policy Search: A Reward Shaping Approach. Proceedings of ICDL-EpiRob conference. Pages 1-6. + +### 2012 +- Mouret, J.-B. and Doncieux, S. (2012). Encouraging Behavioral Diversity in Evolutionary Robotics: an Empirical Study. Evolutionary Computation. Vol 20 No 1 Pages 91-133. +- Ollion, Charles and Doncieux, Stéphane (2012). Towards Behavioral Consistency in Neuroevolution. From Animals to Animats: Proceedings of the 12th International Conference on Adaptive Behaviour (SAB 2012), Springer, publisher. Pages 1-10. +- Ollion, C. and Pinville, T. and Doncieux, S. (2012). With a little help from selection pressures: evolution of memory in robot controllers. Proc. Alife XIII. Pages 1-8. + +### 2011 +- Rubrecht, S. and Singla, E. and Padois, V. and Bidaud, P. and de Broissia, M. (2011). Evolutionary design of a robotic manipulator for a highly constrained environment. Studies in Computational Intelligence, New Horizons in Evolutionary Robotics, Springer, publisher. Vol 341 Pages 109-121. +- Doncieux, S. and Hamdaoui, M. (2011). Evolutionary Algorithms to Analyse and Design a Controller for a Flapping Wings Aircraft. New Horizons in Evolutionary Robotics Extended Contributions from the 2009 EvoDeRob Workshop, Springer, publisher. Pages 67--83. +- Mouret, J.-B. (2011). Novelty-based Multiobjectivization. New Horizons in Evolutionary Robotics: Extended Contributions from the 2009 EvoDeRob Workshop, Springer, publisher. Pages 139--154. +- Pinville, T. and Koos, S. and Mouret, J-B. and Doncieux, S. (2011). How to Promote Generalisation in Evolutionary Robotics: the ProGAb Approach. GECCO'11: Proceedings of the 13th annual conference on Genetic and evolutionary computation ACM, publisher . Pages 259--266. +- Koos, S. and Mouret, J-B. (2011). Online Discovery of Locomotion Modes for Wheel-Legged Hybrid Robots: a Transferability-based Approach. Proceedings of CLAWAR, World Scientific Publishing Co., publisher. Pages 70-77. +- Tonelli, P. and Mouret, J.-B. (2011). On the Relationships between Synaptic Plasticity and Generative Systems. Proceedings of the 13th Annual Conference on Genetic and Evolutionary Computation. Pages 1531--1538. (Best paper of the Generative and Developmental Systems (GDS) track). +- Terekhov, A.V. and Mouret, J.-B. and Grand, C. (2011). Stochastic optimization of a chain sliding mode controller for the mobile robot maneuvering. Proceedings of IEEE / IROS Int. Conf. on Robots and Intelligents Systems. Pages 4360 - 4365 + +### 2010 +- Mouret, J.-B. and Doncieux, S. and Girard, B. (2010). Importing the Computational Neuroscience Toolbox into Neuro-Evolution---Application to Basal Ganglia. GECCO'10: Proceedings of the 12th annual conference on Genetic and evolutionary computation ACM, publisher . Pages 587--594. +- Koos, S. and Mouret, J.-B. and Doncieux, S. (2010). Crossing the Reality Gap in Evolutionary Robotics by Promoting Transferable Controllers. GECCO'10: Proceedings of the 12th annual conference on Genetic and evolutionary computation ACM, publisher . Pages 119--126. +- Doncieux, S. and Mouret, J.-B. (2010). Behavioral diversity measures for Evolutionary Robotics. WCCI 2010 IEEE World Congress on Computational Intelligence, Congress on Evolutionary Computation (CEC). Pages 1303--1310. +- Terekhov, A.V. and Mouret, J.-B. and Grand, C. (2010). Stochastic optimization of a neural network-based controller for aggressive maneuvers on loose surfaces. Proceedings of IEEE / IROS Int. Conf. on Robots and Intelligents Systems. Pages 4782 - 4787 +- Terekhov, A.V and Mouret, J.-B. and Grand, C (2010). Stochastic multi-objective optimization for aggressive maneuver trajectory planning on loose surface. +Proceedings of IFAC: the 7th Symposium on Intelligent Autonomous Vehicles. Pages 1-6 +- Liénard, J. and Guillot, A. and Girard, B. (2010). Multi-Objective Evolutionary Algorithms to Investigate Neurocomputational Issues : The Case Study of Basal Ganglia Models. From animals to animats 11, Springer, publisher. Vol 6226 Pages 597--606 + +### 2009 +- Koos, S. and Mouret, J.-B. and Doncieux, S. (2009). Automatic system identification based on coevolution of models and tests. IEEE Congress on Evolutionary Computation, 2009 (CEC 2009). Pages 560--567 +- Mouret, J.-B. and Doncieux, S. (2009). Evolving modular neural-networks through exaptation. IEEE Congress on Evolutionary Computation, 2009 (CEC 2009). Pages 1570--1577. (Best student paper award) +- Mouret, J.-B. and Doncieux, S. (2009). Overcoming the bootstrap problem in evolutionary robotics using behavioral diversity. IEEE Congress on Evolutionary Computation, 2009 (CEC 2009). Pages 1161 - 1168 +- Mouret, J.-B. and Doncieux, S. (2009). Using Behavioral Exploration Objectives to Solve Deceptive Problems in Neuro-evolution. GECCO'09: Proceedings of the 11th annual conference on Genetic and evolutionary computation , ACM, publisher. Pages 627--634. diff --git a/modules/dnns_easily_fooled/sferes/build.sh b/modules/dnns_easily_fooled/sferes/build.sh new file mode 100755 index 000000000..6f7c77b77 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/build.sh @@ -0,0 +1,55 @@ +#!/bin/bash +home=$(echo ~) + +quit=0 + +# Remove the build folder +rm -rf ./build +echo "Build folder removed." + +# Check the building folder, either on local or Moran +if [ "$home" == "/home/anh" ] +then + echo "Configuring sferes for local.." + echo "..." + ./waf clean + ./waf distclean + #./waf configure --boost-include=/home/anh/src/sferes/include --boost-lib=/home/anh/src/sferes/lib --eigen3=/home/anh/src/sferes/include --mpi=/home/anh/openmpi + ./waf configure --boost-include=/home/anh/src/sferes/include --boost-lib=/home/anh/src/sferes/lib --eigen3=/home/anh/src/sferes/include + + quit=1 + +else + if [ "$home" == "/home/anguyen8" ] + then + echo "Configuring sferes for Moran.." + echo "..." + ./waf clean + ./waf distclean + + # TBB + # ./waf configure --boost-include=/project/RIISVis/anguyen8/sferes/include/ --boost-libs=/project/RIISVis/anguyen8/sferes/lib/ --eigen3=/home/anguyen8/local/include --mpi=/apps/OPENMPI/gnu/4.8.2/1.6.5 --tbb=/home/anguyen8/sferes --libs=/home/anguyen8/local/lib + + # MPI (No TBB) + ./waf configure --boost-include=/project/RIISVis/anguyen8/sferes/include/ --boost-libs=/project/RIISVis/anguyen8/sferes/lib/ --eigen3=/home/anguyen8/local/include --mpi=/apps/OPENMPI/gnu/4.8.2/1.6.5 --libs=/home/anguyen8/local/lib + + quit=1 + + else + echo "Unknown environment. Building stopped." + fi +fi + +if [ "$quit" -eq "1" ] +then + echo "Building sferes.." + echo "..." + echo "..." + ./waf build + + echo "Building exp/images.." + echo "..." + echo "..." + ./waf --exp images +fi + diff --git a/modules/dnns_easily_fooled/sferes/contributors.txt b/modules/dnns_easily_fooled/sferes/contributors.txt new file mode 100644 index 000000000..341bd2991 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/contributors.txt @@ -0,0 +1,3 @@ +Anh Nguyen + +Evolving AI Lab diff --git a/modules/dnns_easily_fooled/sferes/eigen3.py b/modules/dnns_easily_fooled/sferes/eigen3.py new file mode 100644 index 000000000..19787cea2 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/eigen3.py @@ -0,0 +1,39 @@ +#! /usr/bin/env python +# encoding: utf-8 +# JB Mouret - 2009 + +""" +Quick n dirty eigen3 detection +""" + +import os, glob, types +import Options, Configure + +def detect_eigen3(conf): + env = conf.env + opt = Options.options + + conf.env['LIB_EIGEN3'] = '' + conf.env['EIGEN3_FOUND'] = False + if Options.options.no_eigen3: + return 0 + if Options.options.eigen3: + conf.env['CPPPATH_EIGEN3'] = [Options.options.eigen3] + conf.env['LIBPATH_EIGEN3'] = [Options.options.eigen3] + else: + conf.env['CPPPATH_EIGEN3'] = ['/usr/include/eigen3', '/usr/local/include/eigen3', '/usr/include', '/usr/local/include'] + conf.env['LIBPATH_EIGEN3'] = ['/usr/lib', '/usr/local/lib'] + + res = Configure.find_file('Eigen/Core', conf.env['CPPPATH_EIGEN3']) + conf.check_message('header','Eigen/Core', (res != '') , res) + if (res == '') : + return 0 + conf.env['EIGEN3_FOUND'] = True + return 1 + +def detect(conf): + return detect_eigen3(conf) + +def set_options(opt): + opt.add_option('--eigen3', type='string', help='path to eigen3', dest='eigen3') + opt.add_option('--no-eigen3', type='string', help='disable eigen3', dest='no_eigen3') diff --git a/modules/dnns_easily_fooled/sferes/examples/ex_ea.cpp b/modules/dnns_easily_fooled/sferes/examples/ex_ea.cpp new file mode 100644 index 000000000..8aa08b48f --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/examples/ex_ea.cpp @@ -0,0 +1,121 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params +{ + struct evo_float + { + // we choose the polynomial mutation type + SFERES_CONST mutation_t mutation_type = polynomial; + // we choose the polynomial cross-over type + SFERES_CONST cross_over_t cross_over_type = sbx; + // the mutation rate of the real-valued vector + SFERES_CONST float mutation_rate = 0.1f; + // the cross rate of the real-valued vector + SFERES_CONST float cross_rate = 0.5f; + // a parameter of the polynomial mutation + SFERES_CONST float eta_m = 15.0f; + // a parameter of the polynomial cross-over + SFERES_CONST float eta_c = 10.0f; + }; + struct pop + { + // size of the population + SFERES_CONST unsigned size = 200; + // number of generations + SFERES_CONST unsigned nb_gen = 2000; + // how often should the result file be written (here, each 5 + // generation) + SFERES_CONST int dump_period = 5; + // how many individuals should be created during the random + // generation process? + SFERES_CONST int initial_aleat = 1; + // used by RankSimple to select the pressure + SFERES_CONST float coeff = 1.1f; + // the number of individuals that are kept from on generation to + // another (elitism) + SFERES_CONST float keep_rate = 0.6f; + }; + struct parameters + { + // maximum value of parameters + SFERES_CONST float min = -10.0f; + // minimum value + SFERES_CONST float max = 10.0f; + }; +}; + +SFERES_FITNESS(FitTest, sferes::fit::Fitness) +{ + public: + // indiv will have the type defined in the main (phen_t) + template + void eval(const Indiv& ind) + { + float v = 0; + for (unsigned i = 0; i < ind.size(); ++i) + { + float p = ind.data(i); + v += p * p * p * p; + } + this->_value = -v; + } +}; + + + +int main(int argc, char **argv) +{ + // Our fitness is the class FitTest (see above), that we will call + // fit_t. Params is the set of parameters (struct Params) defined in + // this file. + typedef FitTest fit_t; + // We define the genotype. Here we choose EvoFloat (real + // numbers). We evolve 10 real numbers, with the params defined in + // Params (cf the beginning of this file) + typedef gen::EvoFloat<10, Params> gen_t; + // This genotype should be simply transformed into a vector of + // parameters (phen::Parameters). The genotype could also have been + // transformed into a shape, a neural network... The phenotype need + // to know which fitness to use; we pass fit_t. + typedef phen::Parameters phen_t; + // The evaluator is in charge of distributing the evaluation of the + // population. It can be simple eval::Eval (nothing special), + // parallel (for multicore machines, eval::Parallel) or distributed + // (for clusters, eval::Mpi). + typedef eval::Eval eval_t; + // Statistics gather data about the evolutionary process (mean + // fitness, Pareto front, ...). Since they can also stores the best + // individuals, they are the container of our results. We can add as + // many statistics as required thanks to the boost::fusion::vector. + typedef boost::fusion::vector, stat::MeanFit > stat_t; + // Modifiers are functors that are run once all individuals have + // been evalutated. Their typical use is to add some evolutionary + // pressures towards diversity (e.g. fitness sharing). Here we don't + // use this feature. As a consequence we use a "dummy" modifier that + // does nothing. + typedef modif::Dummy<> modifier_t; + // We can finally put everything together. RankSimple is the + // evolutianary algorithm. It is parametrized by the phenotype, the + // evaluator, the statistics list, the modifier and the general params. + typedef ea::RankSimple ea_t; + // We now have a special class for our experiment: ea_t. The next + // line instantiate an object of this class + ea_t ea; + // we can now process the comannd line options an run the + // evolutionary algorithm (if a --load argument is passed, the file + // is loaded; otherwise, the algorithm is launched). + run_ea(argc, argv, ea); + // + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/examples/ex_ea.json b/modules/dnns_easily_fooled/sferes/examples/ex_ea.json new file mode 100644 index 000000000..869cd8482 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/examples/ex_ea.json @@ -0,0 +1,11 @@ +{ + "machines" : + { + "localhost" : 3, + "fortaleza" : 2 + }, + "nb_runs": 3, + "exp" : "examples/ex_ea", + "dir" : "res/ex1", + "debug" : 0 +} \ No newline at end of file diff --git a/modules/dnns_easily_fooled/sferes/examples/ex_ea_mpi.cpp b/modules/dnns_easily_fooled/sferes/examples/ex_ea_mpi.cpp new file mode 100644 index 000000000..eeebbbf6a --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/examples/ex_ea_mpi.cpp @@ -0,0 +1,93 @@ +#include + +#ifdef MPI_ENABLED +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params +{ + struct evo_float + { + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop + { + SFERES_CONST unsigned size = 200; + SFERES_CONST unsigned nb_gen = 40; + SFERES_CONST int dump_period = 5; + SFERES_CONST int initial_aleat = 1; + SFERES_CONST float coeff = 1.1f; + SFERES_CONST float keep_rate = 0.6f; + }; + struct parameters + { + SFERES_CONST float min = -10.0f; + SFERES_CONST float max = 10.0f; + }; +}; + +SFERES_FITNESS(FitTest, sferes::fit::Fitness) +{ + public: + FitTest() + {} + template + void eval(const Indiv& ind) + { + float v = 0; + for (unsigned i = 0; i < ind.size(); ++i) + { + float p = ind.data(i); + v += p * p * p * p; + } + // slow down to simulate a slow fitness + usleep(1e4); + this->_value = -v; + } +}; + + + +int main(int argc, char **argv) +{ + dbg::out(dbg::info)<<"running ex_ea ... try --help for options (verbose)"< gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Mpi eval_t; + typedef boost::fusion::vector, stat::MeanFit > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::RankSimple ea_t; + ea_t ea; + + run_ea(argc, argv, ea); + + std::cout<<"==> best fitness ="<().best()->fit().value()< mean fitness ="<().mean()< +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params +{ + struct evo_float + { + + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop + { + SFERES_CONST unsigned size = 200; + SFERES_CONST int dump_period = 50; + SFERES_ARRAY(float, eps, 0.0075f, 0.0075f); + SFERES_ARRAY(float, min_fit, 0.0f, 0.0f); + SFERES_CONST size_t grain = size / 4; + SFERES_CONST unsigned nb_gen = 2000; + }; + + struct parameters + { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + + +template +float _g(const Indiv &ind) +{ + float g = 0.0f; + assert(ind.size() == 30); + for (size_t i = 1; i < 30; ++i) + g += ind.data(i); + g = 9.0f * g / 29.0f; + g += 1.0f; + return g; +} + +SFERES_FITNESS(FitZDT2, sferes::fit::Fitness) +{ + public: + FitZDT2() {} + template + void eval(Indiv& ind) + { + this->_objs.resize(2); + float f1 = ind.data(0); + float g = _g(ind); + float h = 1.0f - pow((f1 / g), 2.0); + float f2 = g * h; + this->_objs[0] = -f1; + this->_objs[1] = -f2; + } +}; + + + + +int main(int argc, char **argv) +{ + std::cout<<"running "< gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Eval eval_t; + typedef boost::fusion::vector > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::EpsMOEA ea_t; + ea_t ea; + + run_ea(argc, argv, ea); + + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/examples/ex_nsga2.cpp b/modules/dnns_easily_fooled/sferes/examples/ex_nsga2.cpp new file mode 100644 index 000000000..b1e8bfce2 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/examples/ex_nsga2.cpp @@ -0,0 +1,87 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params +{ + struct evo_float + { + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop + { + SFERES_CONST unsigned size = 300; + SFERES_CONST unsigned nb_gen = 500; + SFERES_CONST int dump_period = 50; + SFERES_CONST int initial_aleat = 1; + }; + struct parameters + { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + + +template +float _g(const Indiv &ind) +{ + float g = 0.0f; + assert(ind.size() == 30); + for (size_t i = 1; i < 30; ++i) + g += ind.data(i); + g = 9.0f * g / 29.0f; + g += 1.0f; + return g; +} + +SFERES_FITNESS(FitZDT2, sferes::fit::Fitness) +{ + public: + FitZDT2() {} + template + void eval(Indiv& ind) + { + this->_objs.resize(2); + float f1 = ind.data(0); + float g = _g(ind); + float h = 1.0f - pow((f1 / g), 2.0); + float f2 = g * h; + this->_objs[0] = -f1; + this->_objs[1] = -f2; + } +}; + + + + +int main(int argc, char **argv) +{ + std::cout<<"running "< gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Eval eval_t; + typedef boost::fusion::vector > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::Nsga2 ea_t; + ea_t ea; + + run_ea(argc, argv, ea); + + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/examples/qsub.json b/modules/dnns_easily_fooled/sferes/examples/qsub.json new file mode 100644 index 000000000..e3180f80c --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/examples/qsub.json @@ -0,0 +1,8 @@ +{ + "email" : "mouret@isir.upmc.fr", + "wall_time" : "24:00:00", + "nb_runs": 3, + "bin_dir": "/home/mouret/svn/sferes2/trunk/build/default/examples/", + "res_dir": "/home/mouret/svn/sferes2/trunk/res/", + "exps" : ["ex_ea"] +} diff --git a/modules/dnns_easily_fooled/sferes/examples/wscript b/modules/dnns_easily_fooled/sferes/examples/wscript new file mode 100644 index 000000000..2a6ef812c --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/examples/wscript @@ -0,0 +1,34 @@ +#! /usr/bin/env python +def build(bld): + return None + # ex_ea + #obj = bld.new_task_gen('cxx', 'program') + #obj.source = 'ex_ea.cpp' + #obj.includes = '../' + #obj.target = 'ex_ea' + #obj.uselib_local = 'sferes2' + #obj.uselib = 'TBB BOOST BOOST_UNIT_TEST_FRAMEWORK EIGEN3' + + ## ex_ea + #obj = bld.new_task_gen('cxx', 'program') + #obj.source = 'ex_ea_mpi.cpp' + #obj.includes = '../' + #obj.target = 'ex_ea_mpi' + #obj.uselib_local = 'sferes2' + #obj.uselib = 'TBB BOOST BOOST_UNIT_TEST_FRAMEWORK EIGEN3' + + ## ex_nsga2 + #obj = bld.new_task_gen('cxx', 'program') + #obj.source = 'ex_nsga2.cpp' + #obj.includes = '../' + #obj.target = 'ex_nsga2' + #obj.uselib_local = 'sferes2' + #obj.uselib = 'TBB BOOST BOOST_UNIT_TEST_FRAMEWORK EIGEN3' + + ## ex_eps_moea + #obj = bld.new_task_gen('cxx', 'program') + #obj.source = 'ex_eps_moea.cpp' + #obj.includes = '../' + #obj.target = 'ex_eps_moea' + #obj.uselib_local = 'sferes2' + #obj.uselib = 'TBB BOOST BOOST_UNIT_TEST_FRAMEWORK EIGEN3' diff --git a/modules/dnns_easily_fooled/sferes/exp/example/example.cpp b/modules/dnns_easily_fooled/sferes/exp/example/example.cpp new file mode 100644 index 000000000..d1d1ab78a --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/example/example.cpp @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params +{ + struct evo_float + { + + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop + { + SFERES_CONST unsigned size = 300; + SFERES_CONST unsigned nb_gen = 500; + SFERES_CONST int dump_period = 50; + SFERES_CONST int initial_aleat = 1; + }; + struct parameters + { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + + +template +float _g(const Indiv &ind) +{ + float g = 0.0f; + assert(ind.size() == 30); + for (size_t i = 1; i < 30; ++i) + g += ind.data(i); + g = 9.0f * g / 29.0f; + g += 1.0f; + return g; +} + +SFERES_FITNESS(FitZDT2, sferes::fit::Fitness) +{ + public: + FitZDT2() {} + template + void eval(Indiv& ind) + { + this->_objs.resize(2); + float f1 = ind.data(0); + float g = _g(ind); + float h = 1.0f - pow((f1 / g), 2.0); + float f2 = g * h; + this->_objs[0] = -f1; + this->_objs[1] = -f2; + } +}; + + + + +int main(int argc, char **argv) +{ + std::cout<<"running "< gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Eval eval_t; + typedef boost::fusion::vector > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::Nsga2 ea_t; + ea_t ea; + + run_ea(argc, argv, ea); + + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/exp/example/wscript b/modules/dnns_easily_fooled/sferes/exp/example/wscript new file mode 100644 index 000000000..722067f15 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/example/wscript @@ -0,0 +1,10 @@ + +#! /usr/bin/env python +def build(bld): + obj = bld.new_task_gen('cxx', 'program') + obj.source = 'example.cpp' + obj.includes = '. ../../' + obj.uselib_local = 'sferes2' + obj.uselib = '' + obj.target = 'example' + obj.uselib_local = 'sferes2' diff --git a/modules/dnns_easily_fooled/sferes/exp/images/build_wscript.sh b/modules/dnns_easily_fooled/sferes/exp/images/build_wscript.sh new file mode 100755 index 000000000..983523685 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/build_wscript.sh @@ -0,0 +1,22 @@ +#!/bin/bash +home=$(echo ~) +echo "You are building from: $home" + +# Check the building folder, either on local or Moran +if [ "$home" == "/home/anh" ] +then + echo "Enabled local settings.." + cp ./wscript.local ./wscript + + +else + if [ "$home" == "/home/anguyen8" ] + then + echo "Enabled Moran settings.." + cp ./wscript.moran ./wscript + else + echo "Unknown environment. Building stopped." + fi +fi + + diff --git a/modules/dnns_easily_fooled/sferes/exp/images/continue_run/continue_run.hpp b/modules/dnns_easily_fooled/sferes/exp/images/continue_run/continue_run.hpp new file mode 100644 index 000000000..d7b79f838 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/continue_run/continue_run.hpp @@ -0,0 +1,88 @@ +/* + * continue_run.hpp + * + * Created on: Aug 14, 2014 + * Author: joost + */ + +#ifndef CONTINUE_RUN_HPP_ +#define CONTINUE_RUN_HPP_ + +#include +#include + +#include + +#include "global_options.hpp" +#include + +namespace sferes +{ + namespace cont + { + + template + class Continuator + { + + public: + typedef std::vector pop_t; + + bool enabled() + { + return options::vm.count("continue"); + } + + pop_t getPopulationFromFile(EAType& ea) + { + ea.load(options::vm["continue"].as()); + + return boost::fusion::at_c(ea.stat()).getPopulation(); + } + + pop_t getPopulationFromFile(EAType& ea, const std::string& path_gen_file) + { + ea.load(path_gen_file); + + return boost::fusion::at_c(ea.stat()).getPopulation(); + } + + void run_with_current_population(EAType& ea, const std::string filename) + { + // Read the number of generation from gen file. Ex: gen_450 + int start = 0; + std::string gen_prefix("gen_"); + std::size_t pos = filename.rfind(gen_prefix) + gen_prefix.size(); + std::string gen_number = filename.substr(pos); + std::istringstream ss(gen_number); + ss >> start; + start++; + dbg::out(dbg::info, "continue") << "File name: " << filename << " number start: " << pos << " gen number: " << gen_number << " result: " << start << std::endl; + + // Similar to the run() function in + for (int _gen = start; _gen < Params::pop::nb_gen; ++_gen) + { + ea.setGen(_gen); + ea.epoch(); + ea.update_stats(); + if (_gen % Params::pop::dump_period == 0) + { + ea.write(); + } + } + + std::cout << "Finished all the runs.\n"; + exit(0); + } + + void run_with_current_population(EAType& ea) + { + const std::string filename = options::vm["continue"].as(); + run_with_current_population(ea, filename); + } + + }; + + } +} +#endif /* CONTINUE_RUN_HPP_ */ diff --git a/modules/dnns_easily_fooled/sferes/exp/images/continue_run/global_options.hpp b/modules/dnns_easily_fooled/sferes/exp/images/continue_run/global_options.hpp new file mode 100644 index 000000000..533256f54 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/continue_run/global_options.hpp @@ -0,0 +1,120 @@ +/* + * global_options.hpp + * + * Created on: Aug 14, 2014 + * Author: Joost Huizinga + * + * Header file created to allow for custom command-line options to be added to an experiment. + * Requires you to replace run_ea with options::run_ea to works. + * Options can be added by: + * options::add()("option_name","description"); + */ + +#ifndef GLOBAL_OPTIONS_HPP_ +#define GLOBAL_OPTIONS_HPP_ + +#include +#include +#include + +#include +//#include + +namespace sferes +{ + namespace options + { + + boost::program_options::variables_map vm; + + template + static void run_ea ( int argc, char **argv, Ea& ea, + const boost::program_options::options_description& add_opts = + boost::program_options::options_description(), + bool init_rand = true ) + { + namespace po = boost::program_options; + std::cout << "sferes2 version: " << VERSION << std::endl; + if (init_rand) + { + time_t t = time(0) + ::getpid(); + std::cout << "seed: " << t << std::endl; + srand(t); + } + po::options_description desc("Allowed sferes2 options"); + desc.add(add_opts); + desc.add_options()("help,h", "produce help message")("stat,s", + po::value(), "statistic number")("out,o", + po::value(), "output file")("number,n", po::value(), + "number in stat")("load,l", po::value(), + "load a result file")("verbose,v", + po::value >()->multitoken(), + "verbose output, available default streams : all, ea, fit, phen, trace"); + + // po::variables_map vm; + po::store(po::parse_command_line(argc, argv, desc), vm); + po::notify(vm); + + if (vm.count("help")) + { + std::cout << desc << std::endl; + return; + } + if (vm.count("verbose")) + { + dbg::init(); + std::vector < std::string > streams = vm["verbose"].as< + std::vector >(); + attach_ostream(dbg::warning, std::cout); + attach_ostream(dbg::error, std::cerr); + attach_ostream(dbg::info, std::cout); + bool all = std::find(streams.begin(), streams.end(), "all") + != streams.end(); + bool trace = std::find(streams.begin(), streams.end(), "trace") + != streams.end(); + if (all) + { + streams.push_back("ea"); + streams.push_back("fit"); + streams.push_back("phen"); + streams.push_back("eval"); + } + BOOST_FOREACH(const std::string& s, streams){ + dbg::enable(dbg::all, s.c_str(), true); + dbg::attach_ostream(dbg::info, s.c_str(), std::cout); + if (trace) + dbg::attach_ostream(dbg::tracing, s.c_str(), std::cout); + } + if (trace) + attach_ostream(dbg::tracing, std::cout); + } + + parallel::init(); + if (vm.count("load")) + { + ea.load(vm["load"].as()); + + if (!vm.count("out")) + { + std::cerr << "You must specifiy an out file" << std::endl; + return; + } + else + { + int stat = 0; + int n = 0; + if (vm.count("stat")) + stat = vm["stat"].as(); + if (vm.count("number")) + n = vm["number"].as(); + std::ofstream ofs(vm["out"].as().c_str()); + ea.show_stat(stat, ofs, n); + } + } + else + ea.run(); + } + + } +} +#endif /* GLOBAL_OPTIONS_HPP_ */ diff --git a/modules/dnns_easily_fooled/sferes/exp/images/dl_images.hpp b/modules/dnns_easily_fooled/sferes/exp/images/dl_images.hpp new file mode 100644 index 000000000..ac28e6d85 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/dl_images.hpp @@ -0,0 +1,32 @@ +#ifndef DEEP_LEARNING_IMAGES_HPP +#define DEEP_LEARNING_IMAGES_HPP + +#include "settings.h" +#include + +using namespace sferes; + +// Parameters required by Caffe separated from those introduced by Sferes +struct ParamsCaffe +{ + struct image + { + // Size of the square image 256x256 + SFERES_CONST int size = 256; + SFERES_CONST int crop_size = 227; + SFERES_CONST bool use_crops = true; + + SFERES_CONST bool color = true; // true: color, false: grayscale images + + // GPU configurations + SFERES_CONST bool use_gpu = false; + + // GPU on Moran can only handle max of 512 images in a batch at a time. + SFERES_CONST int batch = 1; + SFERES_CONST int num_categories = 1000; // ILSVR2012 ImageNet has 1000 categories + static int category_id; + SFERES_CONST bool record_lineage = false; // Flag to save the parent's assigned class + }; +}; + +#endif /* DEEP_LEARNING_IMAGES_HPP */ diff --git a/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images.cpp b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images.cpp new file mode 100644 index 000000000..ad2e66de9 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images.cpp @@ -0,0 +1,232 @@ +#include "dl_images.hpp" + +#include +#include +#include +#include + +#include "stat/best_fit_map_image.hpp" +#include "stat/stat_map_image.hpp" + +#include +#include + + +// Evolutionary algorithms -------------------------------- +#include "fit/fit_map_deep_learning.hpp" +#include +#include +#include "phen/phen_color_image.hpp" + +#include +// Caffe ------------------------------------------------- + +#include +#include "eval/mpi_parallel.hpp" // MPI +#include "continue_run/continue_run.hpp" // MPI + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float; + +struct Params +{ + struct cont + { + static const int getPopIndex = 0; + }; + + struct log + { + SFERES_CONST bool best_image = false; + }; + + struct ea + { + SFERES_CONST size_t res_x = 1; // 256; + SFERES_CONST size_t res_y = 1000; // 256; + }; + + struct dnn + { + SFERES_CONST size_t nb_inputs = 4; + SFERES_CONST size_t nb_outputs = 3; // Red, Green, Blue + + SFERES_CONST float m_rate_add_conn = 0.5f; + SFERES_CONST float m_rate_del_conn = 0.3f; + SFERES_CONST float m_rate_change_conn = 0.5f; + SFERES_CONST float m_rate_add_neuron = 0.5f; + SFERES_CONST float m_rate_del_neuron = 0.2f; + + SFERES_CONST init_t init = ff; + }; + + struct evo_float + { + // we choose the polynomial mutation type + SFERES_CONST mutation_t mutation_type = polynomial; + // we choose the polynomial cross-over type + SFERES_CONST cross_over_t cross_over_type = sbx; + // the mutation rate of the real-valued vector + SFERES_CONST float mutation_rate = 0.1f; + // the cross rate of the real-valued vector + SFERES_CONST float cross_rate = 0.5f; + // a parameter of the polynomial mutation + SFERES_CONST float eta_m = 15.0f; + // a parameter of the polynomial cross-over + SFERES_CONST float eta_c = 10.0f; + }; + + struct pop + { + //number of initial random points + static const size_t init_size = 400; // 1000 + // size of the population + SFERES_CONST unsigned size = 400; //200; + // number of generations + SFERES_CONST unsigned nb_gen = 5001; //10,000; + // how often should the result file be written (here, each 5 + // generation) + static int dump_period;// 5; + // how many individuals should be created during the random + // generation process? + SFERES_CONST int initial_aleat = 1; + // used by RankSimple to select the pressure + SFERES_CONST float coeff = 1.1f; + // the number of individuals that are kept from on generation to + // another (elitism) + SFERES_CONST float keep_rate = 0.6f; + }; + + struct parameters + { + // maximum value of parameters + SFERES_CONST float min = -10.0f; + // minimum value + SFERES_CONST float max = 10.0f; + }; + + struct cppn + { + // params of the CPPN + struct sampled + { + SFERES_ARRAY(float, values, nn::cppn::sine, nn::cppn::sigmoid, nn::cppn::gaussian, nn::cppn::linear); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + }; + + // Specific settings for MNIST database of grayscale + struct image : ParamsCaffe::image + { + static const std::string model_definition; + static const std::string pretrained_model; + }; + +}; + +// Initialize the parameter files for Caffe network. +#ifdef LOCAL_RUN + +const std::string Params::image::model_definition = "/home/anh/src/model/imagenet_deploy_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/home/anh/src/model/caffe_reference_imagenet_model"; + +#else + +const std::string Params::image::model_definition = "/project/EvolvingAI/anguyen8/model/imagenet_deploy_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/project/EvolvingAI/anguyen8/model/caffe_reference_imagenet_model"; + +#endif + +int Params::pop::dump_period = 1000; + + +int main(int argc, char **argv) +{ + // Disable GLOG output from experiment and also Caffe + // Comment out for debugging + google::InitGoogleLogging(""); + google::SetStderrLogging(3); + + // Our fitness is the class FitTest (see above), that we will call + // fit_t. Params is the set of parameters (struct Params) defined in + // this file. + typedef sferes::fit::FitMapDeepLearning fit_t; + // We define the genotype. Here we choose EvoFloat (real + // numbers). We evolve 10 real numbers, with the params defined in + // Params (cf the beginning of this file) + //typedef gen::EvoFloat<10, Params> gen_t; + typedef phen::Parameters, fit::FitDummy<>, Params> weight_t; + typedef gen::HyperNn cppn_t; + // This genotype should be simply transformed into a vector of + // parameters (phen::Parameters). The genotype could also have been + // transformed into a shape, a neural network... The phenotype need + // to know which fitness to use; we pass fit_t. + typedef phen::ColorImage phen_t; + // The evaluator is in charge of distributing the evaluation of the + // population. It can be simple eval::Eval (nothing special), + // parallel (for multicore machines, eval::Parallel) or distributed + // (for clusters, eval::Mpi). +// typedef eval::Eval eval_t; + typedef eval::MpiParallel eval_t; // TBB + + // Statistics gather data about the evolutionary process (mean + // fitness, Pareto front, ...). Since they can also stores the best + // individuals, they are the container of our results. We can add as + // many statistics as required thanks to the boost::fusion::vector. +// typedef boost::fusion::vector, stat::MeanFit > stat_t; + typedef boost::fusion::vector, stat::BestFitMapImage > stat_t; + // Modifiers are functors that are run once all individuals have + // been evalutated. Their typical use is to add some evolutionary + // pressures towards diversity (e.g. fitness sharing). Here we don't + // use this feature. As a consequence we use a "dummy" modifier that + // does nothing. + typedef modif::Dummy<> modifier_t; + // We can finally put everything together. RankSimple is the + // evolutianary algorithm. It is parametrized by the phenotype, the + // evaluator, the statistics list, the modifier and the general params. +// typedef ea::RankSimple ea_t; + typedef ea::MapElite ea_t; + // We now have a special class for our experiment: ea_t. The next + // line instantiate an object of this class + ea_t ea; + // we can now process the command line options an run the + // evolutionary algorithm (if a --load argument is passed, the file + // is loaded; otherwise, the algorithm is launched). + + if (argc > 1) // if a number is provided on the command line + { + int randomSeed = atoi(argv[1]); + printf("randomSeed:%i\n", randomSeed); + srand(randomSeed); //set it as the random seed + + boost::program_options::options_description add_opts = + boost::program_options::options_description(); + + shared_ptr opt (new boost::program_options::option_description( + "continue,t", boost::program_options::value(), + "continue from the loaded file starting from the generation provided" + )); + + add_opts.add(opt); + + options::run_ea(argc, argv, ea, add_opts, false); + } + else + { + run_ea(argc, argv, ea); + } + + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_imagenet_direct_encoding.cpp b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_imagenet_direct_encoding.cpp new file mode 100644 index 000000000..5460cebcb --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_imagenet_direct_encoding.cpp @@ -0,0 +1,197 @@ +#include "dl_images.hpp" + +#include +#include +#include "gen/evo_float_image.hpp" +#include + +#include "stat/best_fit_map_image.hpp" +#include "stat/stat_map_image.hpp" + +#include +#include + + +// Evolutionary algorithms -------------------------------- +#include "fit/fit_map_deep_learning.hpp" +#include +#include +#include +#include "phen/phen_image_direct.hpp" + +#include +// Caffe ------------------------------------------------- + +#include +#include "eval/mpi_parallel.hpp" // MPI +#include "continue_run/continue_run.hpp" // MPI + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float_image; + +struct Params +{ + struct cont + { + static const int getPopIndex = 0; + }; + + struct log + { + SFERES_CONST bool best_image = false; + }; + + struct ea + { + SFERES_CONST size_t res_x = 1; // 256; + SFERES_CONST size_t res_y = 1000; // 256; + }; + + struct evo_float_image + { + // we choose the polynomial mutation type + SFERES_CONST mutation_t mutation_type = polynomial; + // we choose the polynomial cross-over type + SFERES_CONST cross_over_t cross_over_type = sbx; + // the mutation rate of the real-valued vector + static float mutation_rate; + // the cross rate of the real-valued vector + SFERES_CONST float cross_rate = 0.5f; + // a parameter of the polynomial mutation + SFERES_CONST float eta_m = 15.0f; + // a parameter of the polynomial cross-over + SFERES_CONST float eta_c = 10.0f; + }; + + struct pop + { + //number of initial random points + SFERES_CONST size_t init_size = 200; // 1000 + // size of the population + SFERES_CONST unsigned size = 200; //200; + // number of generations + SFERES_CONST unsigned nb_gen = 5010; //10,000; + // how often should the result file be written (here, each 5 + // generation) + static int dump_period;// 5; + // how many individuals should be created during the random + // generation process? + SFERES_CONST int initial_aleat = 1; + // used by RankSimple to select the pressure + SFERES_CONST float coeff = 1.1f; + // the number of individuals that are kept from on generation to + // another (elitism) + SFERES_CONST float keep_rate = 0.6f; + }; + + struct parameters + { + // maximum value of parameters + SFERES_CONST float min = -10.0f; + // minimum value + SFERES_CONST float max = 10.0f; + }; + + // Specific settings for MNIST database of grayscale + struct image : ParamsCaffe::image + { + static const std::string model_definition; + static const std::string pretrained_model; + }; + +}; + +// Initialize the parameter files for Caffe network. +#ifdef LOCAL_RUN + +const std::string Params::image::model_definition = "/home/anh/src/model/imagenet_deploy_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/home/anh/src/model/caffe_reference_imagenet_model"; + +#else + +const std::string Params::image::model_definition = "/project/EvolvingAI/anguyen8/model/imagenet_deploy_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/project/EvolvingAI/anguyen8/model/caffe_reference_imagenet_model"; + +#endif + +int Params::pop::dump_period = 1000; +float Params::evo_float_image::mutation_rate = 0.1f; + + +int main(int argc, char **argv) +{ + // Disable GLOG output from experiment and also Caffe + // Comment out for debugging + google::InitGoogleLogging(""); + google::SetStderrLogging(3); + + // Our fitness is the class FitTest (see above), that we will call + // fit_t. Params is the set of parameters (struct Params) defined in + // this file. + typedef sferes::fit::FitMapDeepLearning fit_t; + // We define the genotype. Here we choose EvoFloat (real + // numbers). We evolve 10 real numbers, with the params defined in + // Params (cf the beginning of this file) + typedef gen::EvoFloatImage gen_t; + // This genotype should be simply transformed into a vector of + // parameters (phen::Parameters). The genotype could also have been + // transformed into a shape, a neural network... The phenotype need + // to know which fitness to use; we pass fit_t. + typedef phen::ImageDirect phen_t; + // The evaluator is in charge of distributing the evaluation of the + // population. It can be simple eval::Eval (nothing special), + // parallel (for multicore machines, eval::Parallel) or distributed + // (for clusters, eval::Mpi). +// typedef eval::Eval eval_t; + typedef eval::MpiParallel eval_t; // TBB + + // Statistics gather data about the evolutionary process (mean + // fitness, Pareto front, ...). Since they can also stores the best + // individuals, they are the container of our results. We can add as + // many statistics as required thanks to the boost::fusion::vector. +// typedef boost::fusion::vector, stat::MeanFit > stat_t; + typedef boost::fusion::vector, stat::BestFitMapImage > stat_t; + // Modifiers are functors that are run once all individuals have + // been evalutated. Their typical use is to add some evolutionary + // pressures towards diversity (e.g. fitness sharing). Here we don't + // use this feature. As a consequence we use a "dummy" modifier that + // does nothing. + typedef modif::Dummy<> modifier_t; + // We can finally put everything together. RankSimple is the + // evolutianary algorithm. It is parametrized by the phenotype, the + // evaluator, the statistics list, the modifier and the general params. +// typedef ea::RankSimple ea_t; + typedef ea::MapElite ea_t; + // We now have a special class for our experiment: ea_t. The next + // line instantiate an object of this class + ea_t ea; + // we can now process the command line options an run the + // evolutionary algorithm (if a --load argument is passed, the file + // is loaded; otherwise, the algorithm is launched). + + if (argc > 1) // if a number is provided on the command line + { + int randomSeed = atoi(argv[1]); + printf("randomSeed:%i\n", randomSeed); + srand(randomSeed); //set it as the random seed + + boost::program_options::options_description add_opts = + boost::program_options::options_description(); + + shared_ptr opt (new boost::program_options::option_description( + "continue,t", boost::program_options::value(), + "continue from the loaded file starting from the generation provided" + )); + + add_opts.add(opt); + + options::run_ea(argc, argv, ea, add_opts, false); + } + else + { + run_ea(argc, argv, ea); + } + + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_mnist.cpp b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_mnist.cpp new file mode 100644 index 000000000..968b5596e --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_mnist.cpp @@ -0,0 +1,241 @@ +#include "dl_images.hpp" + +#include +#include +#include +#include + +#include "stat/best_fit_map_image.hpp" +#include "stat/stat_map_image.hpp" + +#include +#include + + +// Evolutionary algorithms -------------------------------- +#include "fit/fit_map_deep_learning.hpp" +#include +#include +#include "phen/phen_grayscale_image.hpp" + +#include +// Caffe ------------------------------------------------- + +#include +#include "eval/mpi_parallel.hpp" // MPI +#include "continue_run/continue_run.hpp" // MPI + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float; + +struct Params +{ + struct cont + { + static const int getPopIndex = 0; + }; + + struct log + { + SFERES_CONST bool best_image = false; + }; + + struct ea + { + SFERES_CONST size_t res_x = 1; // 256; + SFERES_CONST size_t res_y = 10; // 256; + }; + + struct dnn + { + SFERES_CONST size_t nb_inputs = 4; + SFERES_CONST size_t nb_outputs = 1; // Red, Green, Blue + + SFERES_CONST float m_rate_add_conn = 0.5f; + SFERES_CONST float m_rate_del_conn = 0.3f; + SFERES_CONST float m_rate_change_conn = 0.5f; + SFERES_CONST float m_rate_add_neuron = 0.5f; + SFERES_CONST float m_rate_del_neuron = 0.2f; + + SFERES_CONST init_t init = ff; + }; + + struct evo_float + { + // we choose the polynomial mutation type + SFERES_CONST mutation_t mutation_type = polynomial; + // we choose the polynomial cross-over type + SFERES_CONST cross_over_t cross_over_type = sbx; + // the mutation rate of the real-valued vector + SFERES_CONST float mutation_rate = 0.1f; + // the cross rate of the real-valued vector + SFERES_CONST float cross_rate = 0.5f; + // a parameter of the polynomial mutation + SFERES_CONST float eta_m = 15.0f; + // a parameter of the polynomial cross-over + SFERES_CONST float eta_c = 10.0f; + }; + + struct pop + { + //number of initial random points + static const size_t init_size = 200; // 1000 + // size of the population + SFERES_CONST unsigned size = 200; //200; + // number of generations + SFERES_CONST unsigned nb_gen = 1001; //10,000; + // how often should the result file be written (here, each 5 + // generation) + static int dump_period;// 5; + // how many individuals should be created during the random + // generation process? + SFERES_CONST int initial_aleat = 1; + // used by RankSimple to select the pressure + SFERES_CONST float coeff = 1.1f; + // the number of individuals that are kept from on generation to + // another (elitism) + SFERES_CONST float keep_rate = 0.6f; + }; + + struct parameters + { + // maximum value of parameters + SFERES_CONST float min = -10.0f; + // minimum value + SFERES_CONST float max = 10.0f; + }; + + struct cppn + { + // params of the CPPN + struct sampled + { + SFERES_ARRAY(float, values, nn::cppn::sine, nn::cppn::sigmoid, nn::cppn::gaussian, nn::cppn::linear); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + }; + + // Specific settings for MNIST database of grayscale + struct image : ParamsCaffe::image + { + // Size of the square image 256x256 + SFERES_CONST int size = 28; + SFERES_CONST bool use_crops = false; + SFERES_CONST bool color = false; // Grayscale + + SFERES_CONST int num_categories = 10; // MNIST has 10 categories + + static const std::string model_definition; + static const std::string pretrained_model; + + SFERES_CONST bool record_lineage = true; + }; + +}; + +// Initialize the parameter files for Caffe network. +#ifdef LOCAL_RUN + +const std::string Params::image::model_definition = "/home/anh/src/model/lenet_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/home/anh/src/model/lenet_iter_10000"; + +#else + +const std::string Params::image::model_definition = "/project/EvolvingAI/anguyen8/model/lenet_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/project/EvolvingAI/anguyen8/model/lenet_iter_10000"; + +#endif + +int Params::pop::dump_period = 100; + + +int main(int argc, char **argv) +{ + // Disable GLOG output from experiment and also Caffe + // Comment out for debugging + google::InitGoogleLogging(""); + google::SetStderrLogging(3); + + // Our fitness is the class FitTest (see above), that we will call + // fit_t. Params is the set of parameters (struct Params) defined in + // this file. + typedef sferes::fit::FitMapDeepLearning fit_t; + // We define the genotype. Here we choose EvoFloat (real + // numbers). We evolve 10 real numbers, with the params defined in + // Params (cf the beginning of this file) + //typedef gen::EvoFloat<10, Params> gen_t; + typedef phen::Parameters, fit::FitDummy<>, Params> weight_t; + typedef gen::HyperNn cppn_t; + // This genotype should be simply transformed into a vector of + // parameters (phen::Parameters). The genotype could also have been + // transformed into a shape, a neural network... The phenotype need + // to know which fitness to use; we pass fit_t. + typedef phen::GrayscaleImage phen_t; + // The evaluator is in charge of distributing the evaluation of the + // population. It can be simple eval::Eval (nothing special), + // parallel (for multicore machines, eval::Parallel) or distributed + // (for clusters, eval::Mpi). +// typedef eval::Eval eval_t; + typedef eval::MpiParallel eval_t; // TBB + + // Statistics gather data about the evolutionary process (mean + // fitness, Pareto front, ...). Since they can also stores the best + // individuals, they are the container of our results. We can add as + // many statistics as required thanks to the boost::fusion::vector. +// typedef boost::fusion::vector, stat::MeanFit > stat_t; + typedef boost::fusion::vector, stat::BestFitMapImage > stat_t; + // Modifiers are functors that are run once all individuals have + // been evalutated. Their typical use is to add some evolutionary + // pressures towards diversity (e.g. fitness sharing). Here we don't + // use this feature. As a consequence we use a "dummy" modifier that + // does nothing. + typedef modif::Dummy<> modifier_t; + // We can finally put everything together. RankSimple is the + // evolutianary algorithm. It is parametrized by the phenotype, the + // evaluator, the statistics list, the modifier and the general params. +// typedef ea::RankSimple ea_t; + typedef ea::MapElite ea_t; + // We now have a special class for our experiment: ea_t. The next + // line instantiate an object of this class + ea_t ea; + // we can now process the command line options an run the + // evolutionary algorithm (if a --load argument is passed, the file + // is loaded; otherwise, the algorithm is launched). + + if (argc > 1) // if a number is provided on the command line + { + int randomSeed = atoi(argv[1]); + printf("randomSeed:%i\n", randomSeed); + srand(randomSeed); //set it as the random seed + + boost::program_options::options_description add_opts = + boost::program_options::options_description(); + + shared_ptr opt (new boost::program_options::option_description( + "continue,t", boost::program_options::value(), + "continue from the loaded file starting from the generation provided" + )); + + add_opts.add(opt); + + options::run_ea(argc, argv, ea, add_opts, false); + } + else + { + run_ea(argc, argv, ea); + } + + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_mnist_direct_encoding.cpp b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_mnist_direct_encoding.cpp new file mode 100644 index 000000000..dcb18c16e --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_mnist_direct_encoding.cpp @@ -0,0 +1,202 @@ +#include "dl_images.hpp" + +#include +#include +#include "gen/evo_float_image.hpp" +#include + +#include "stat/best_fit_map_image.hpp" +#include "stat/stat_map_image.hpp" + +#include +#include + + +// Evolutionary algorithms -------------------------------- +#include "fit/fit_map_deep_learning.hpp" +#include +#include +#include "phen/phen_grayscale_image_direct.hpp" + +#include +// Caffe ------------------------------------------------- + +#include +#include "eval/mpi_parallel.hpp" // MPI +#include "continue_run/continue_run.hpp" // MPI + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float_image; + +struct Params +{ + struct cont + { + static const int getPopIndex = 0; + }; + + struct log + { + SFERES_CONST bool best_image = false; + }; + + struct ea + { + SFERES_CONST size_t res_x = 1; // 256; + SFERES_CONST size_t res_y = 10; // 256; + }; + + struct evo_float_image + { + // we choose the polynomial mutation type + SFERES_CONST mutation_t mutation_type = polynomial; + // we choose the polynomial cross-over type + SFERES_CONST cross_over_t cross_over_type = sbx; + // the mutation rate of the real-valued vector + SFERES_CONST float mutation_rate = 0.1f; + // the cross rate of the real-valued vector + SFERES_CONST float cross_rate = 0.5f; + // a parameter of the polynomial mutation + SFERES_CONST float eta_m = 15.0f; + // a parameter of the polynomial cross-over + SFERES_CONST float eta_c = 10.0f; + }; + + struct pop + { + //number of initial random points + static const size_t init_size = 200; // 1000 + // size of the population + SFERES_CONST unsigned size = 200; //200; + // number of generations + SFERES_CONST unsigned nb_gen = 1010; //10,000; + // how often should the result file be written (here, each 5 + // generation) + static int dump_period;// 5; + // how many individuals should be created during the random + // generation process? + SFERES_CONST int initial_aleat = 1; + // used by RankSimple to select the pressure + SFERES_CONST float coeff = 1.1f; + // the number of individuals that are kept from on generation to + // another (elitism) + SFERES_CONST float keep_rate = 0.6f; + }; + + struct parameters + { + // maximum value of parameters + SFERES_CONST float min = -10.0f; + // minimum value + SFERES_CONST float max = 10.0f; + }; + + // Specific settings for MNIST database of grayscale + struct image : ParamsCaffe::image + { + // Size of the square image 256x256 + SFERES_CONST int size = 28; + SFERES_CONST bool use_crops = false; + SFERES_CONST bool color = false; // Grayscale + + SFERES_CONST int num_categories = 10; // MNIST has 10 categories + + static const std::string model_definition; + static const std::string pretrained_model; + }; + +}; + +// Initialize the parameter files for Caffe network. +#ifdef LOCAL_RUN + +const std::string Params::image::model_definition = "/home/anh/src/model/lenet_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/home/anh/src/model/lenet_iter_10000"; + +#else + +const std::string Params::image::model_definition = "/project/EvolvingAI/anguyen8/model/lenet_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/project/EvolvingAI/anguyen8/model/lenet_iter_10000"; + +#endif + +int Params::pop::dump_period = 10; + + +int main(int argc, char **argv) +{ + // Disable GLOG output from experiment and also Caffe + // Comment out for debugging + google::InitGoogleLogging(""); + google::SetStderrLogging(3); + + // Our fitness is the class FitTest (see above), that we will call + // fit_t. Params is the set of parameters (struct Params) defined in + // this file. + typedef sferes::fit::FitMapDeepLearning fit_t; + // We define the genotype. Here we choose EvoFloat (real + // numbers). We evolve 10 real numbers, with the params defined in + // Params (cf the beginning of this file) + typedef gen::EvoFloatImage gen_t; + // This genotype should be simply transformed into a vector of + // parameters (phen::Parameters). The genotype could also have been + // transformed into a shape, a neural network... The phenotype need + // to know which fitness to use; we pass fit_t. + typedef phen::GrayscaleImageDirect phen_t; + // The evaluator is in charge of distributing the evaluation of the + // population. It can be simple eval::Eval (nothing special), + // parallel (for multicore machines, eval::Parallel) or distributed + // (for clusters, eval::Mpi). +// typedef eval::Eval eval_t; + typedef eval::MpiParallel eval_t; // TBB + + // Statistics gather data about the evolutionary process (mean + // fitness, Pareto front, ...). Since they can also stores the best + // individuals, they are the container of our results. We can add as + // many statistics as required thanks to the boost::fusion::vector. +// typedef boost::fusion::vector, stat::MeanFit > stat_t; + typedef boost::fusion::vector, stat::BestFitMapImage > stat_t; + // Modifiers are functors that are run once all individuals have + // been evalutated. Their typical use is to add some evolutionary + // pressures towards diversity (e.g. fitness sharing). Here we don't + // use this feature. As a consequence we use a "dummy" modifier that + // does nothing. + typedef modif::Dummy<> modifier_t; + // We can finally put everything together. RankSimple is the + // evolutianary algorithm. It is parametrized by the phenotype, the + // evaluator, the statistics list, the modifier and the general params. +// typedef ea::RankSimple ea_t; + typedef ea::MapElite ea_t; + // We now have a special class for our experiment: ea_t. The next + // line instantiate an object of this class + ea_t ea; + // we can now process the command line options an run the + // evolutionary algorithm (if a --load argument is passed, the file + // is loaded; otherwise, the algorithm is launched). + + if (argc > 1) // if a number is provided on the command line + { + int randomSeed = atoi(argv[1]); + printf("randomSeed:%i\n", randomSeed); + srand(randomSeed); //set it as the random seed + + boost::program_options::options_description add_opts = + boost::program_options::options_description(); + + shared_ptr opt (new boost::program_options::option_description( + "continue,t", boost::program_options::value(), + "continue from the loaded file starting from the generation provided" + )); + + add_opts.add(opt); + + options::run_ea(argc, argv, ea, add_opts, false); + } + else + { + run_ea(argc, argv, ea); + } + + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_test.cpp b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_test.cpp new file mode 100644 index 000000000..64faa8ed7 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/dl_map_elites_images_test.cpp @@ -0,0 +1,237 @@ +#include "dl_images.hpp" + +#include +#include +#include +#include + +#include "stat/best_fit_map_image.hpp" +#include "stat/stat_map_image.hpp" + +#include +#include + + +// Evolutionary algorithms -------------------------------- +#include "fit/fit_map_deep_learning.hpp" +#include +#include +#include "phen/phen_color_image.hpp" + +#include +// Caffe ------------------------------------------------- + +#include +#include "eval/mpi_parallel.hpp" // MPI +#include "continue_run/continue_run.hpp" // MPI + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float; + +struct Params +{ + struct cont + { + static const int getPopIndex = 0; + }; + + struct log + { + SFERES_CONST bool best_image = false; + }; + + struct ea + { + SFERES_CONST size_t res_x = 1; // 256; + SFERES_CONST size_t res_y = 10; // 256; + }; + + struct dnn + { + SFERES_CONST size_t nb_inputs = 4; + SFERES_CONST size_t nb_outputs = 3; // Red, Green, Blue + + SFERES_CONST float m_rate_add_conn = 0.5f; + SFERES_CONST float m_rate_del_conn = 0.3f; + SFERES_CONST float m_rate_change_conn = 0.5f; + SFERES_CONST float m_rate_add_neuron = 0.5f; + SFERES_CONST float m_rate_del_neuron = 0.2f; + + SFERES_CONST init_t init = ff; + }; + + struct evo_float + { + // we choose the polynomial mutation type + SFERES_CONST mutation_t mutation_type = polynomial; + // we choose the polynomial cross-over type + SFERES_CONST cross_over_t cross_over_type = sbx; + // the mutation rate of the real-valued vector + SFERES_CONST float mutation_rate = 0.1f; + // the cross rate of the real-valued vector + SFERES_CONST float cross_rate = 0.5f; + // a parameter of the polynomial mutation + SFERES_CONST float eta_m = 15.0f; + // a parameter of the polynomial cross-over + SFERES_CONST float eta_c = 10.0f; + }; + + struct pop + { + //number of initial random points + static const size_t init_size = 10; // 1000 + // size of the population + SFERES_CONST unsigned size = 10; //200; + // number of generations + SFERES_CONST unsigned nb_gen = 10; //10,000; + // how often should the result file be written (here, each 5 + // generation) + static int dump_period;// 5; + // how many individuals should be created during the random + // generation process? + SFERES_CONST int initial_aleat = 1; + // used by RankSimple to select the pressure + SFERES_CONST float coeff = 1.1f; + // the number of individuals that are kept from on generation to + // another (elitism) + SFERES_CONST float keep_rate = 0.6f; + }; + + struct parameters + { + // maximum value of parameters + SFERES_CONST float min = -10.0f; + // minimum value + SFERES_CONST float max = 10.0f; + }; + + struct cppn + { + // params of the CPPN + struct sampled + { + SFERES_ARRAY(float, values, nn::cppn::sine, nn::cppn::sigmoid, nn::cppn::gaussian, nn::cppn::linear); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + }; + + // Specific settings for MNIST database of grayscale + struct image : ParamsCaffe::image + { + static const std::string model_definition; + static const std::string pretrained_model; + + SFERES_CONST bool record_lineage = false; + SFERES_CONST int size = 28; + SFERES_CONST bool use_crops = false; + SFERES_CONST int num_categories = 10; // ILSVR2012 ImageNet has 1000 categories + }; + +}; + +// Initialize the parameter files for Caffe network. +#ifdef LOCAL_RUN + +const std::string Params::image::model_definition = "/home/anh/src/model/imagenet_deploy_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/home/anh/src/model/caffe_reference_imagenet_model"; + +#else + +const std::string Params::image::model_definition = "/project/EvolvingAI/anguyen8/model/imagenet_deploy_image_memory_data.prototxt"; +const std::string Params::image::pretrained_model = "/project/EvolvingAI/anguyen8/model/caffe_reference_imagenet_model"; + +#endif + +int Params::pop::dump_period = 1; + + +int main(int argc, char **argv) +{ + // Disable GLOG output from experiment and also Caffe + // Comment out for debugging + google::InitGoogleLogging(""); + google::SetStderrLogging(3); + + // Our fitness is the class FitTest (see above), that we will call + // fit_t. Params is the set of parameters (struct Params) defined in + // this file. + typedef sferes::fit::FitMapDeepLearning fit_t; + // We define the genotype. Here we choose EvoFloat (real + // numbers). We evolve 10 real numbers, with the params defined in + // Params (cf the beginning of this file) + //typedef gen::EvoFloat<10, Params> gen_t; + typedef phen::Parameters, fit::FitDummy<>, Params> weight_t; + typedef gen::HyperNn cppn_t; + // This genotype should be simply transformed into a vector of + // parameters (phen::Parameters). The genotype could also have been + // transformed into a shape, a neural network... The phenotype need + // to know which fitness to use; we pass fit_t. + typedef phen::ColorImage phen_t; + // The evaluator is in charge of distributing the evaluation of the + // population. It can be simple eval::Eval (nothing special), + // parallel (for multicore machines, eval::Parallel) or distributed + // (for clusters, eval::Mpi). +// typedef eval::Eval eval_t; + typedef eval::MpiParallel eval_t; // TBB + + // Statistics gather data about the evolutionary process (mean + // fitness, Pareto front, ...). Since they can also stores the best + // individuals, they are the container of our results. We can add as + // many statistics as required thanks to the boost::fusion::vector. +// typedef boost::fusion::vector, stat::MeanFit > stat_t; + typedef boost::fusion::vector, stat::BestFitMapImage > stat_t; + // Modifiers are functors that are run once all individuals have + // been evalutated. Their typical use is to add some evolutionary + // pressures towards diversity (e.g. fitness sharing). Here we don't + // use this feature. As a consequence we use a "dummy" modifier that + // does nothing. + typedef modif::Dummy<> modifier_t; + // We can finally put everything together. RankSimple is the + // evolutianary algorithm. It is parametrized by the phenotype, the + // evaluator, the statistics list, the modifier and the general params. +// typedef ea::RankSimple ea_t; + typedef ea::MapElite ea_t; + // We now have a special class for our experiment: ea_t. The next + // line instantiate an object of this class + ea_t ea; + // we can now process the command line options an run the + // evolutionary algorithm (if a --load argument is passed, the file + // is loaded; otherwise, the algorithm is launched). + + if (argc > 1) // if a number is provided on the command line + { + int randomSeed = atoi(argv[1]); + printf("randomSeed:%i\n", randomSeed); + srand(randomSeed); //set it as the random seed + + boost::program_options::options_description add_opts = + boost::program_options::options_description(); + + shared_ptr opt (new boost::program_options::option_description( + "continue,t", boost::program_options::value(), + "continue from the loaded file starting from the generation provided" + )); + + add_opts.add(opt); + + options::run_ea(argc, argv, ea, add_opts, false); + } + else + { + run_ea(argc, argv, ea); + } + + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/exp/images/ea/ea_custom.hpp b/modules/dnns_easily_fooled/sferes/exp/images/ea/ea_custom.hpp new file mode 100644 index 000000000..485f25782 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/ea/ea_custom.hpp @@ -0,0 +1,127 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef EA_CUSTOM_HPP_ +#define EA_CUSTOM_HPP_ + +#include +#include +#include +#include +#include + +namespace sferes { + namespace ea { + + SFERES_EA(EaCustom, Ea) { + protected: + std::string _gen_file_path; + + public: + EaCustom () : _gen_file_path("") + { + this->_make_res_dir(); + } + + void _make_res_dir() + { + if (Params::pop::dump_period == -1) + { + return; + } + + // Delete the unused folder by Ea + std::string to_delete = misc::hostname() + "_" + misc::date() + "_" + misc::getpid(); + + if (boost::filesystem::is_directory(to_delete) && boost::filesystem::is_empty(to_delete)) + { + boost::filesystem::remove(to_delete); + } + + // Check if such a folder already exists + this->_res_dir = "mmm"; // Only one folder regardless which platform the program is running on + + boost::filesystem::path my_path(this->_res_dir); + + // Create a new folder if it doesn't exist + if (!boost::filesystem::exists(boost::filesystem::status(my_path))) + { + // Create a new folder if it does not exist + boost::filesystem::create_directory(my_path); + } + // Run experiment from that folder + else + { + std::vector gens; + + // The file to find + int max = 0; + + // Find a gen file + for(boost::filesystem::directory_entry& entry : boost::make_iterator_range(boost::filesystem::directory_iterator(my_path), {})) + { + // Find out if '/gen_' exists in the filename + std::string e = entry.path().string(); + + std::string prefix = this->_res_dir + "/gen_"; + size_t found = e.find(prefix); + + if (found != std::string::npos) + { + // Extract out the generation number + std::string number = std::string(e).replace(found, prefix.length(), ""); + + // Remove double quotes +// number = boost::replace_all_copy(number, "\"", "");.string() + + int gen = boost::lexical_cast(number); + if (gen > max) + { + max = gen; + _gen_file_path = e; + } + + } // end if + } // end for-loop + + // Start run from that gen file +// _continue_run = boost::filesystem::current_path().string() + "/" + _continue_run; + std::cout << "[A]: " << _gen_file_path << "\n"; + } + } + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/ea/rank_simple.hpp b/modules/dnns_easily_fooled/sferes/exp/images/ea/rank_simple.hpp new file mode 100644 index 000000000..8f8b48ec9 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/ea/rank_simple.hpp @@ -0,0 +1,167 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef RANK_SIMPLE_HPP_ +#define RANK_SIMPLE_HPP_ + +#include +#include +#include +#include "ea_custom.hpp" +#include + +#include + +namespace sferes { + namespace ea { + SFERES_EA(RankSimple, EaCustom) { + public: + + typedef boost::shared_ptr indiv_t; + typedef std::vector raw_pop_t; + typedef typename std::vector pop_t; + typedef RankSimple this_t; + + SFERES_CONST unsigned nb_keep = (unsigned)(Params::pop::keep_rate * Params::pop::size); + + void random_pop() + { + sferes::cont::Continuator continuator; + + // Continuing a run manually from command line or continuing a run automatically if the job was pre-empted + bool continue_run = continuator.enabled() || this->_gen_file_path != ""; + if(continue_run) + { + // Load the population file + raw_pop_t raw_pop; + + if (this->_gen_file_path == "") + { + raw_pop = continuator.getPopulationFromFile(*this); + } + else + { + raw_pop = continuator.getPopulationFromFile(*this, this->_gen_file_path); + } + + // Get the number of population to continue with + const size_t init_size = raw_pop.size(); + + // Resize the current population archive + this->_pop.resize(init_size); + + // Add loaded individuals to the new population + int i = 0; + BOOST_FOREACH(boost::shared_ptr&indiv, this->_pop) + { + indiv = boost::shared_ptr(new Phen(*raw_pop[i])); + ++i; + } + } + else + { + // Original Map-Elites code + // Intialize a random population + this->_pop.resize(Params::pop::size * Params::pop::initial_aleat); + BOOST_FOREACH(boost::shared_ptr& indiv, this->_pop) + { + indiv = boost::shared_ptr(new Phen()); + indiv->random(); + } + } + + // Evaluate the initialized population + this->_eval.eval(this->_pop, 0, this->_pop.size()); + this->apply_modifier(); + std::partial_sort(this->_pop.begin(), this->_pop.begin() + Params::pop::size, + this->_pop.end(), fit::compare()); + this->_pop.resize(Params::pop::size); + + // Continue a run from a specific generation + if(continue_run) + { + if (this->_gen_file_path == "") + { + continuator.run_with_current_population(*this); + } + else + { + continuator.run_with_current_population(*this, this->_gen_file_path); + } + } + } + + //ADDED + void setGen(size_t gen) + { + this->_gen = gen; + } + //ADDED END + + void epoch() + { + assert(this->_pop.size()); + for (unsigned i = nb_keep; i < this->_pop.size(); i += 2) { + unsigned r1 = _random_rank(); + unsigned r2 = _random_rank(); + boost::shared_ptr i1, i2; + this->_pop[r1]->cross(this->_pop[r2], i1, i2); + i1->mutate(); + i2->mutate(); + this->_pop[i] = i1; + this->_pop[i + 1] = i2; + } +#ifndef EA_EVAL_ALL + this->_eval.eval(this->_pop, nb_keep, Params::pop::size); +#else + this->_eval.eval(this->_pop, 0, Params::pop::size); +#endif + this->apply_modifier(); + std::partial_sort(this->_pop.begin(), this->_pop.begin() + nb_keep, + this->_pop.end(), fit::compare()); + dbg::out(dbg::info, "ea")<<"best fitness: " << this->_pop[0]->fit().value() << std::endl; + } + protected: + unsigned _random_rank() { + static float kappa = pow(Params::pop::coeff, nb_keep + 1.0f) - 1.0f; + static float facteur = nb_keep / ::log(kappa + 1); + return (unsigned) (this->_pop.size() - facteur * log(misc::rand(1) * kappa + 1)); + } + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/eval/batch_mpi_parallel.hpp b/modules/dnns_easily_fooled/sferes/exp/images/eval/batch_mpi_parallel.hpp new file mode 100644 index 000000000..91299af9f --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/eval/batch_mpi_parallel.hpp @@ -0,0 +1,238 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef BATCH_EVAL_MPI_PARALLEL_HPP_ +#define BATCH_EVAL_MPI_PARALLEL_HPP_ + +#include +#include +#include "tbb_parallel_eval.hpp" +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" + +//#ifndef BOOST_MPI_HAS_NOARG_INITIALIZATION +//#error MPI need arguments (we require a full MPI2 implementation) +//#endif + +#define MPI_INFO dbg::out(dbg::info, "mpi")<<"["<<_world->rank()<<"] " +namespace sferes { + + namespace eval { + SFERES_CLASS(BatchMpiTBBParallel) { + public: + BatchMpiTBBParallel() + { + static char* argv[] = {(char*)"sferes2", 0x0}; + char** argv2 = (char**) malloc(sizeof(char*) * 2); + int argc = 1; + argv2[0] = argv[0]; + argv2[1] = argv[1]; + using namespace boost; + dbg::out(dbg::info, "mpi")<<"Initializing MPI..."<(new mpi::environment(argc, argv2, true)); + dbg::out(dbg::info, "mpi")<<"MPI initialized"<(new mpi::communicator()); + MPI_INFO << "communicator initialized"<rank() > 0) + { + Params::pop::dump_period = -1; + } + } + + template + void eval(std::vector >& pop, + size_t begin, size_t end) { + dbg::trace("mpi", DBG_HERE); + + // Develop phenotypes in parallel + // Each MPI process develops one phenotype + if (_world->rank() == 0) + _master_develop(pop, begin, end); + else + _slave_develop(); + + // Make sure the processes have finished developing phenotypes + + // Evaluate phenotypes in parallel but in batches of 256. + // Caffe GPU supports max of 512. + // There is no limit for CPU but we try to find out what batch size works best. + if (_world->rank() == 0) + { + _master_eval(pop, begin, end); + } + } + + ~BatchMpiTBBParallel() + { + MPI_INFO << "Finalizing MPI..."<rank() == 0) + for (size_t i = 1; i < _world->size(); ++i) + _world->send(i, _env->max_tag(), s); + _finalize(); + } + + protected: + void _finalize() + { + _world = boost::shared_ptr(); + dbg::out(dbg::info, "mpi")<<"MPI world destroyed"<(); + dbg::out(dbg::info, "mpi")<<"environment destroyed"< + void _master_develop(std::vector >& pop, size_t begin, size_t end) + { + dbg::trace("mpi", DBG_HERE); + size_t current = begin; + std::vector developed(pop.size()); + std::fill(developed.begin(), developed.end(), false); + // first round + for (size_t i = 1; i < _world->size() && current < end; ++i) { + MPI_INFO << "[master] [send-init...] ->" <send(i, current, pop[current]->gen()); + MPI_INFO << "[master] [send-init ok] ->" <" <send(s.source(), current, pop[current]->gen()); + MPI_INFO << "[master] [send ok] ->" < + boost::mpi::status _recv(std::vector& developed, + std::vector >& pop) + { + dbg::trace("mpi", DBG_HERE); + using namespace boost::mpi; + status s = _world->probe(); + MPI_INFO << "[rcv...]" << getpid() << " tag=" << s.tag() << std::endl; + //_world->recv(s.source(), s.tag(), pop[s.tag()]->fit()); + + // Receive the whole developed phenotype from slave processes + Phen p; + _world->recv(s.source(), s.tag(), p); + + // Assign the developed phenotype back to the current population for further evaluation + pop[s.tag()]->image() = p.image(); + + MPI_INFO << "[rcv ok]" << " tag=" << s.tag() << std::endl; + developed[s.tag()] = true; + return s; + } + + template + void _slave_develop() + { + dbg::trace("mpi", DBG_HERE); + while(true) { + Phen p; + boost::mpi::status s = _world->probe(); + if (s.tag() == _env->max_tag()) { + MPI_INFO << "[slave] Quit requested" << std::endl; + MPI_Finalize(); + exit(0); + } else { + MPI_INFO <<"[slave] [rcv...] [" << getpid()<< "]" << std::endl; + _world->recv(0, s.tag(), p.gen()); + MPI_INFO <<"[slave] [rcv ok] " << " tag="<(pop, Params::image::model_definition, Params::image::pretrained_model)); + + // The barrier is implicitly set here after the for-loop in TBB. + } + + boost::shared_ptr _env; + boost::shared_ptr _world; + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/eval/batch_mpi_tbb_parallel.hpp b/modules/dnns_easily_fooled/sferes/exp/images/eval/batch_mpi_tbb_parallel.hpp new file mode 100644 index 000000000..2e4255d4c --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/eval/batch_mpi_tbb_parallel.hpp @@ -0,0 +1,238 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef BATCH_EVAL_MPI_TBB_PARALLEL_HPP_ +#define BATCH_EVAL_MPI_TBB_PARALLEL_HPP_ + +#include +#include +#include "tbb_parallel_eval.hpp" +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" + +//#ifndef BOOST_MPI_HAS_NOARG_INITIALIZATION +//#error MPI need arguments (we require a full MPI2 implementation) +//#endif + +#define MPI_INFO dbg::out(dbg::info, "mpi")<<"["<<_world->rank()<<"] " +namespace sferes { + + namespace eval { + SFERES_CLASS(BatchMpiParallel) { + public: + BatchMpiParallel() + { + static char* argv[] = {(char*)"sferes2", 0x0}; + char** argv2 = (char**) malloc(sizeof(char*) * 2); + int argc = 1; + argv2[0] = argv[0]; + argv2[1] = argv[1]; + using namespace boost; + dbg::out(dbg::info, "mpi")<<"Initializing MPI..."<(new mpi::environment(argc, argv2, true)); + dbg::out(dbg::info, "mpi")<<"MPI initialized"<(new mpi::communicator()); + MPI_INFO << "communicator initialized"<rank() > 0) + { + Params::pop::dump_period = -1; + } + } + + template + void eval(std::vector >& pop, + size_t begin, size_t end) { + dbg::trace("mpi", DBG_HERE); + + // Develop phenotypes in parallel + // Each MPI process develops one phenotype + if (_world->rank() == 0) + _master_develop(pop, begin, end); + else + _slave_develop(); + + // Make sure the processes have finished developing phenotypes + + // Evaluate phenotypes in parallel but in batches of 256. + // Caffe GPU supports max of 512. + // There is no limit for CPU but we try to find out what batch size works best. + if (_world->rank() == 0) + { + _master_eval(pop, begin, end); + } + } + + ~BatchMpiParallel() + { + MPI_INFO << "Finalizing MPI..."<rank() == 0) + for (size_t i = 1; i < _world->size(); ++i) + _world->send(i, _env->max_tag(), s); + _finalize(); + } + + protected: + void _finalize() + { + _world = boost::shared_ptr(); + dbg::out(dbg::info, "mpi")<<"MPI world destroyed"<(); + dbg::out(dbg::info, "mpi")<<"environment destroyed"< + void _master_develop(std::vector >& pop, size_t begin, size_t end) + { + dbg::trace("mpi", DBG_HERE); + size_t current = begin; + std::vector developed(pop.size()); + std::fill(developed.begin(), developed.end(), false); + // first round + for (size_t i = 1; i < _world->size() && current < end; ++i) { + MPI_INFO << "[master] [send-init...] ->" <send(i, current, pop[current]->gen()); + MPI_INFO << "[master] [send-init ok] ->" <" <send(s.source(), current, pop[current]->gen()); + MPI_INFO << "[master] [send ok] ->" < + boost::mpi::status _recv(std::vector& developed, + std::vector >& pop) + { + dbg::trace("mpi", DBG_HERE); + using namespace boost::mpi; + status s = _world->probe(); + MPI_INFO << "[rcv...]" << getpid() << " tag=" << s.tag() << std::endl; + //_world->recv(s.source(), s.tag(), pop[s.tag()]->fit()); + + // Receive the whole developed phenotype from slave processes + Phen p; + _world->recv(s.source(), s.tag(), p); + + // Assign the developed phenotype back to the current population for further evaluation + pop[s.tag()]->image() = p.image(); + + MPI_INFO << "[rcv ok]" << " tag=" << s.tag() << std::endl; + developed[s.tag()] = true; + return s; + } + + template + void _slave_develop() + { + dbg::trace("mpi", DBG_HERE); + while(true) { + Phen p; + boost::mpi::status s = _world->probe(); + if (s.tag() == _env->max_tag()) { + MPI_INFO << "[slave] Quit requested" << std::endl; + MPI_Finalize(); + exit(0); + } else { + MPI_INFO <<"[slave] [rcv...] [" << getpid()<< "]" << std::endl; + _world->recv(0, s.tag(), p.gen()); + MPI_INFO <<"[slave] [rcv ok] " << " tag="<(pop, Params::image::model_definition, Params::image::pretrained_model)); + + // The barrier is implicitly set here after the for-loop in TBB. + } + + boost::shared_ptr _env; + boost::shared_ptr _world; + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/eval/cuda_parallel.hpp b/modules/dnns_easily_fooled/sferes/exp/images/eval/cuda_parallel.hpp new file mode 100644 index 000000000..09ac530da --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/eval/cuda_parallel.hpp @@ -0,0 +1,272 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef EVAL_CUDA_PARALLEL_HPP_ +#define EVAL_CUDA_PARALLEL_HPP_ + +#include +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" + +#include + +namespace sferes { + + namespace caffe + { + /** + * Using a shared_ptr to hold a pointer to a statically allocated object. + * http://www.boost.org/doc/libs/1_55_0/libs/smart_ptr/sp_techniques.html#static + */ + struct null_deleter + { + void operator()(void const *) const + { + } + }; + + class CaffeFactory + { + private: + static bool initialized; + static Net* _net_1; + static Net* _net_2; + static int _status; + + public: + static shared_ptr > getCaffe(const std::string model_definition, const std::string pretrained_model) + { + if (!initialized) + { + // Initialize Caffe net 1 + _net_1 = new Net(model_definition); + + // Get the trained model + _net_1->CopyTrainedLayersFrom(pretrained_model); + + // Initialize Caffe net 2 + _net_2 = new Net(model_definition); + + // Get the trained model + _net_2->CopyTrainedLayersFrom(pretrained_model); + + initialized = true; + } + + if (_status == 1) + { + _status = 2; + shared_ptr > c(_net_1, null_deleter()); + return c; + } + else + { + _status = 1; + shared_ptr > c(_net_2, null_deleter()); + return c; + } + } + + CaffeFactory() + { + initialized = false; + _status = 1; + } + + }; + } + + namespace eval { + /** + * Develop phenotypes in parallel using TBB. + */ + template + struct _parallel_develop { + typedef std::vector > pop_t; + pop_t _pop; + + ~_parallel_develop() { } + _parallel_develop(pop_t& pop) : _pop(pop) {} + _parallel_develop(const _parallel_develop& ev) : _pop(ev._pop) {} + + void operator() (const parallel::range_t& r) const { + for (size_t i = r.begin(); i != r.end(); ++i) { + assert(i < _pop.size()); + _pop[i]->develop(); + } + } + }; + + + SFERES_CLASS(CudaParallel) + { + private: + + /** + * Develop phenotypes in parallel using TBB. + */ + template + struct _parallel_cuda_eval { + typedef std::vector > pop_t; + pop_t _pop; + + ~_parallel_cuda_eval() { } + _parallel_cuda_eval(pop_t& pop) : _pop(pop) {} + _parallel_cuda_eval(const _parallel_cuda_eval& ev) : _pop(ev._pop) {} + + void operator() (const parallel::range_t& r) const + { + size_t begin = r.begin(); + size_t end = r.end(); + + LOG(INFO) << "Begin: " << begin << " --> " << end << "\n"; + + dbg::trace trace("eval_cuda", DBG_HERE); + assert(_pop.size()); + assert(begin < _pop.size()); + assert(end <= _pop.size()); + + // Algorithm works as follow: + // Send the individuals to Caffe first + // Get back a list of results + // Assign the results to individuals + + // Construct a list of images to be in the batch + std::vector images(0); + + for (size_t i = begin; i < end; ++i) + { + cv::Mat output; + _pop[i]->imageBGR(output); + + images.push_back( output ); // Add to a list of images + } + + // Initialize Caffe net + shared_ptr > caffe_test_net = sferes::caffe::CaffeFactory::getCaffe( + Params::image::model_definition, + Params::image::pretrained_model + ); + +// shared_ptr > caffe_test_net = +// boost::shared_ptr >(new Net(Params::image::model_definition)); +// +// // Get the trained model +// caffe_test_net->CopyTrainedLayersFrom(Params::image::pretrained_model); + + // Run ForwardPrefilled + float loss; // const vector*>& result = caffe_test_net.ForwardPrefilled(&loss); + + // Number of eval iterations + const size_t num_images = end - begin; + + // Add images and labels manually to the ImageDataLayer + // vector images(num_images, image); + vector labels(num_images, 0); + const shared_ptr > image_data_layer = + boost::static_pointer_cast >( + caffe_test_net->layer_by_name("data")); + + image_data_layer->AddImagesAndLabels(images, labels); + + // Classify this batch of 512 images + const vector*>& result = caffe_test_net->ForwardPrefilled(&loss); + + // Get the highest layer of Softmax + const float* argmaxs = result[1]->cpu_data(); + + // Get back a list of results + LOG(INFO) << "Number of results: " << result[1]->num() << "\n"; + + // Assign the results to individuals + for(int i = 0; i < num_images * 2; i += 2) + { + LOG(INFO)<< " Image: "<< i/2 + 1 << " class:" << argmaxs[i] << " : " << argmaxs[i+1] << "\n"; + + int pop_index = begin + i/2; // Index of individual in the batch + + // Set the fitness of this individual + _pop[pop_index]->fit().setFitness((float) argmaxs[i+1]); + + // For Map-Elite, set the cell description + _pop[pop_index]->fit().set_desc(0, argmaxs[i]); + } + } + }; + + public: + template + void eval(std::vector >& pop, size_t begin, size_t end) + { + dbg::trace trace("eval", DBG_HERE); + + assert(pop.size()); + assert(begin < pop.size()); + assert(end <= pop.size()); + + // Develop phenotypes in parallel using TBB. + // The barrier is implicitly set here after the for-loop in TBB. + //parallel::init(); + // We have only 2 GPUs per node + //tbb::task_scheduler_init init1(4); + parallel::p_for(parallel::range_t(begin, end), + _parallel_develop(pop)); + + // Number of eval iterations + const size_t count = end - begin; + + LOG(INFO) << "Size: " << count << " vs " << Params::image::batch << "\n"; + + // Load balancing + // We have only 2 GPUs per node + //tbb::task_scheduler_init init2(2); + + parallel::p_for( + parallel::range_t(begin, end, Params::image::batch), + _parallel_cuda_eval(pop)); + } + + }; + + } +} + +bool sferes::caffe::CaffeFactory::initialized; +int sferes::caffe::CaffeFactory::_status; +Net* sferes::caffe::CaffeFactory::_net_1; +Net* sferes::caffe::CaffeFactory::_net_2; + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/eval/mpi_parallel.hpp b/modules/dnns_easily_fooled/sferes/exp/images/eval/mpi_parallel.hpp new file mode 100644 index 000000000..cbcff17e4 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/eval/mpi_parallel.hpp @@ -0,0 +1,196 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef EVAL_MPI_PARALLEL_HPP_ +#define EVAL_MPI_PARALLEL_HPP_ + +#include +#include + +//#ifndef BOOST_MPI_HAS_NOARG_INITIALIZATION +//#error MPI need arguments (we require a full MPI2 implementation) +//#endif + +#define MPI_INFO dbg::out(dbg::info, "mpi")<<"["<<_world->rank()<<"] " +namespace sferes { + + namespace eval { + SFERES_CLASS(MpiParallel) { + public: + MpiParallel() { + static char* argv[] = {(char*)"sferes2", 0x0}; + char** argv2 = (char**) malloc(sizeof(char*) * 2); + int argc = 1; + argv2[0] = argv[0]; + argv2[1] = argv[1]; + using namespace boost; + dbg::out(dbg::info, "mpi")<<"Initializing MPI..."<(new mpi::environment(argc, argv2, true)); + dbg::out(dbg::info, "mpi")<<"MPI initialized"<(new mpi::communicator()); + MPI_INFO << "communicator initialized"<rank() > 0) + { + Params::pop::dump_period = -1; + } + + } + + template + void eval(std::vector >& pop, + size_t begin, size_t end) { + dbg::trace("mpi", DBG_HERE); + if (_world->rank() == 0) + _master_loop(pop, begin, end); + else + _slave_loop(); + } + + ~MpiParallel() + { + MPI_INFO << "Finalizing MPI..."<rank() == 0) + for (size_t i = 1; i < _world->size(); ++i) + _world->send(i, _env->max_tag(), s); + _finalize(); + } + + protected: + void _finalize() + { + _world = boost::shared_ptr(); + dbg::out(dbg::info, "mpi")<<"MPI world destroyed"<(); + dbg::out(dbg::info, "mpi")<<"environment destroyed"< + void _master_loop(std::vector >& pop, size_t begin, size_t end) + { + dbg::trace("mpi", DBG_HERE); + size_t current = begin; + std::vector evaluated(pop.size()); + std::fill(evaluated.begin(), evaluated.end(), false); + // first round + for (size_t i = 1; i < _world->size() && current < end; ++i) { + MPI_INFO << "[master] [send-init...] ->" <send(i, current, pop[current]->gen()); + MPI_INFO << "[master] [send-init ok] ->" <" <send(s.source(), current, pop[current]->gen()); + MPI_INFO << "[master] [send ok] ->" < + boost::mpi::status _recv(std::vector& evaluated, + std::vector >& pop) + { + dbg::trace("mpi", DBG_HERE); + using namespace boost::mpi; + status s = _world->probe(); + MPI_INFO << "[rcv...]" << getpid() << " tag=" << s.tag() << std::endl; + //_world->recv(s.source(), s.tag(), pop[s.tag()]->fit()); + + // Receive the whole developed phenotype from slave processes + Phen p; + _world->recv(s.source(), s.tag(), p); + + // Assign the developed data back to the current population for further evaluation + pop[s.tag()]->fit() = p.fit(); + pop[s.tag()]->image() = p.image(); + + MPI_INFO << "[rcv ok]" << " tag=" << s.tag() << std::endl; + evaluated[s.tag()] = true; + return s; + } + + template + void _slave_loop() + { + dbg::trace("mpi", DBG_HERE); + while(true) { + Phen p; + boost::mpi::status s = _world->probe(); + if (s.tag() == _env->max_tag()) { + MPI_INFO << "[slave] Quit requested" << std::endl; + MPI_Finalize(); + exit(0); + } else { + MPI_INFO <<"[slave] [rcv...] [" << getpid()<< "]" << std::endl; + _world->recv(0, s.tag(), p.gen()); + MPI_INFO <<"[slave] [rcv ok] " << " tag="< +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "tbb_parallel_develop.hpp" +#include "tbb_parallel_eval.hpp" + +#include + +namespace sferes { + + namespace eval { + + SFERES_CLASS(TBBParallel) + { + + public: + template + void eval(std::vector >& pop, size_t begin, size_t end) + { + dbg::trace trace("eval", DBG_HERE); + + assert(pop.size()); + assert(begin < pop.size()); + assert(end <= pop.size()); + + // Develop phenotypes in parallel using TBB. + // The barrier is implicitly set here after the for-loop in TBB. + parallel::init(); + parallel::p_for(parallel::range_t(begin, end), + sferes::eval::parallel_develop(pop)); + + // Number of eval iterations + const size_t count = end - begin; + + LOG(INFO) << "Size: " << count << " vs " << Params::image::batch << "\n"; + + // Evaluate phenotypes in parallel using TBB. + parallel::p_for( + parallel::range_t(begin, end, Params::image::batch), + sferes::eval::parallel_tbb_eval(pop, Params::image::model_definition, Params::image::pretrained_model)); + } + + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/eval/tbb_parallel_develop.hpp b/modules/dnns_easily_fooled/sferes/exp/images/eval/tbb_parallel_develop.hpp new file mode 100644 index 000000000..b50db7992 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/eval/tbb_parallel_develop.hpp @@ -0,0 +1,66 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef TBB_PARALLEL_DEVELOP_HPP_ +#define TBB_PARALLEL_DEVELOP_HPP_ + +#include + +namespace sferes { + + namespace eval { + /** + * Develop phenotypes in parallel using TBB. + */ + template + struct parallel_develop { + typedef std::vector > pop_t; + pop_t _pop; + + ~parallel_develop() { } + parallel_develop(pop_t& pop) : _pop(pop) {} + parallel_develop(const parallel_develop& ev) : _pop(ev._pop) {} + + void operator() (const parallel::range_t& r) const { + for (size_t i = r.begin(); i != r.end(); ++i) { + assert(i < _pop.size()); + _pop[i]->develop(); + } + } + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/eval/tbb_parallel_eval.hpp b/modules/dnns_easily_fooled/sferes/exp/images/eval/tbb_parallel_eval.hpp new file mode 100644 index 000000000..45a8af2ca --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/eval/tbb_parallel_eval.hpp @@ -0,0 +1,154 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef EVAL_TBB_PARALLEL_EVAL_HPP_ +#define EVAL_TBB_PARALLEL_EVAL_HPP_ + +#include +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "tbb_parallel_develop.hpp" + +#include + +namespace sferes { + + namespace eval { + + /** + * Develop phenotypes in parallel using TBB. + */ + template + struct parallel_tbb_eval { + typedef std::vector > pop_t; + pop_t _pop; + std::string _model_definition; + std::string _pretrained_model; + + ~parallel_tbb_eval() { } + parallel_tbb_eval(pop_t& pop, const std::string model_definition, const std::string pretrained_model) : + _pop(pop), + _model_definition(model_definition), + _pretrained_model(pretrained_model) + { + } + + parallel_tbb_eval(const parallel_tbb_eval& ev) : + _pop(ev._pop), + _model_definition(_model_definition), + _pretrained_model(_pretrained_model) + { + } + + void operator() (const parallel::range_t& r) const + { + size_t begin = r.begin(); + size_t end = r.end(); + + LOG(INFO) << "Begin: " << begin << " --> " << end << "\n"; + + dbg::trace trace("eval_cuda", DBG_HERE); + assert(_pop.size()); + assert(begin < _pop.size()); + assert(end <= _pop.size()); + + // Algorithm works as follow: + // Send the individuals to Caffe first + // Get back a list of results + // Assign the results to individuals + + // Construct a list of images to be in the batch + std::vector images(0); + + for (size_t i = begin; i < end; ++i) + { + cv::Mat output; + _pop[i]->imageBGR(output); + + images.push_back( output ); // Add to a list of images + } + + // Initialize Caffe net + shared_ptr > caffe_test_net = + boost::shared_ptr >(new Net(_model_definition)); + + // Get the trained model + caffe_test_net->CopyTrainedLayersFrom(_pretrained_model); + + // Run ForwardPrefilled + float loss; // const vector*>& result = caffe_test_net.ForwardPrefilled(&loss); + + // Number of eval iterations + const size_t num_images = end - begin; + + // Add images and labels manually to the ImageDataLayer + // vector images(num_images, image); + vector labels(num_images, 0); + const shared_ptr > image_data_layer = + boost::static_pointer_cast >( + caffe_test_net->layer_by_name("data")); + + image_data_layer->AddImagesAndLabels(images, labels); + + // Classify this batch of 512 images + const vector*>& result = caffe_test_net->ForwardPrefilled(&loss); + + // Get the highest layer of Softmax + const float* argmaxs = result[1]->cpu_data(); + + // Get back a list of results + LOG(INFO) << "Number of results: " << result[1]->num() << "\n"; + + // Assign the results to individuals + for(int i = 0; i < num_images * 2; i += 2) + { + LOG(INFO)<< " Image: "<< i/2 + 1 << " class:" << argmaxs[i] << " : " << argmaxs[i+1] << "\n"; + + int pop_index = begin + i/2; // Index of individual in the batch + + // Set the fitness of this individual + _pop[pop_index]->fit().setFitness((float) argmaxs[i+1]); + + // For Map-Elite, set the cell description + _pop[pop_index]->fit().set_desc(0, argmaxs[i]); + } + } + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/fit/fit_deep_learning.hpp b/modules/dnns_easily_fooled/sferes/exp/images/fit/fit_deep_learning.hpp new file mode 100644 index 000000000..dac7c3014 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/fit/fit_deep_learning.hpp @@ -0,0 +1,296 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef FIT_DEEP_LEARNING_HPP +#define FIT_DEEP_LEARNING_HPP + +#include + +// Caffe ------------------------------------------------- +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +// Caffe ------------------------------------------------- + +using namespace caffe; + +#define FIT_DEEP_LEARNING(Name) SFERES_FITNESS(Name, sferes::fit::Fitness) + +namespace sferes +{ + namespace fit + { + SFERES_FITNESS(FitDeepLearning, sferes::fit::Fitness) + { + protected: + + /** + * Crop an image based on the coordinates and the size of the crop. + */ + static cv::Mat crop(const cv::Mat& image, + const size_t x, const size_t y, const size_t width, const size_t height, const size_t offset, const bool flip = false) + { + // Setup a rectangle to define your region of interest + // int x, int y, int width, int height + cv::Rect myROI(x, y, width, height); // top-left + + // Crop the full image to that image contained by the rectangle myROI + // Note that this doesn't copy the data + cv::Mat croppedImage = image(myROI); + + // Create a background image of size 256x256 + cv::Mat background (Params::image::size, Params::image::size, CV_8UC3, cv::Scalar(255, 255, 255)); + + // Because we are using crop size of 227x227 which is odd, when the image size is even 256x256 + // This adjustment helps aligning the crop. + int left = offset/2; + if (flip) + { + left++; + } + + // Because Caffe requires 256x256 images, we paste the crop back to a dummy background. + croppedImage.copyTo(background(cv::Rect(left, offset/2, width, height))); + + return background; + } + + /** + * Create ten crops (4 corners, 1 center, and x2 for mirrors). + * Following Alex 2012 paper. + * The 10 crops are added back to the list. + */ + static void _createTenCrops(const cv::Mat& image, vector& list) + { + // Offset + const int crop_size = Params::image::crop_size; + const int offset = Params::image::size - crop_size; + + // 1. Top-left + { + cv::Mat cropped = crop(image, 0, 0, crop_size, crop_size, offset); + + // Add a crop to list + list.push_back(cropped); + + cv::Mat flipped; + cv::flip(crop(image, 0, 0, crop_size, crop_size, offset, true), flipped, 1); + + // Add a flipped crop to list + list.push_back(flipped); + } + + // 2. Top-Right + { + cv::Mat cropped = crop(image, offset, 0, crop_size, crop_size, offset); + + // Add a crop to list + list.push_back(cropped); + + cv::Mat flipped; + cv::flip(crop(image, offset, 0, crop_size, crop_size, offset, true), flipped, 1); + + // Add a flipped crop to list + list.push_back(flipped); + } + + // 3. Bottom-left + { + cv::Mat cropped = crop(image, 0, offset, crop_size, crop_size, offset); + + // Add a crop to list + list.push_back(cropped); + + cv::Mat flipped; + cv::flip(crop(image, 0, offset, crop_size, crop_size, offset, true), flipped, 1); + + // Add a flipped crop to list + list.push_back(flipped); + } + + // 4. Bottom-right + { + cv::Mat cropped = crop(image, offset, offset, crop_size, crop_size, offset); + + // Add a crop to list + list.push_back(cropped); + + cv::Mat flipped; + cv::flip(crop(image, offset, offset, crop_size, crop_size, offset, true), flipped, 1); + + // Add a flipped crop to list + list.push_back(flipped); + } + + // 5. Center and its mirror + { + cv::Mat cropped = crop(image, offset/2, offset/2, crop_size, crop_size, offset); + + // Add a crop to list + list.push_back(cropped); + + cv::Mat flipped; + cv::flip(crop(image, offset/2, offset/2, crop_size, crop_size, offset, true), flipped, 1); + + // Add a flipped crop to list + list.push_back(flipped); + } + } + + private: + /** + * Evaluate the given image to see its probability in the given category. + */ + float _getProbability(const cv::Mat& image, const int category) + { + this->initCaffeNet(); //Initialize caffe + + // Initialize test network + shared_ptr > caffe_test_net = shared_ptr >( new Net(Params::image::model_definition)); + + // Get the trained model + caffe_test_net->CopyTrainedLayersFrom(Params::image::pretrained_model); + + // Run ForwardPrefilled + float loss; + + // Add images and labels manually to the ImageDataLayer + vector labels(10, 0); + vector images; + + // Add images to the list + if (Params::image::use_crops) + { + // Ten crops have been stored in the vector + _createTenCrops(image, images); + } + else + { + images.push_back(image); + } + + // Classify images + const shared_ptr > image_data_layer = + boost::static_pointer_cast >( + caffe_test_net->layer_by_name("data")); + + image_data_layer->AddImagesAndLabels(images, labels); + + const vector*>& result = caffe_test_net->ForwardPrefilled(&loss); + + // Get the highest layer of Softmax + const float* softmax = result[1]->cpu_data(); + + // If use 10 crops, we have to average the predictions of 10 crops + if (Params::image::use_crops) + { + vector values; + + // Average the predictions of evaluating 10 crops + for(int i = 0; i < Params::image::num_categories; ++i) + { + boost::accumulators::accumulator_set > avg; + + for(int j = 0; j < 10 * Params::image::num_categories; j += Params::image::num_categories) + { + avg(softmax[i + j]); + } + + double mean = boost::accumulators::mean(avg); + + values.push_back(mean); + } + + return values[category]; + } + // If use only 1 crop + else + { + return softmax[category]; + } + + } + + public: + + // Indiv will have the type defined in the main (phen_t) + template + void eval(const Indiv& ind) + { + // Convert image to BGR before evaluating + cv::Mat output; + + // Convert HLS into BGR because imwrite uses BGR color space + cv::cvtColor(ind.image(), output, CV_HLS2BGR); + + // Evolve images to be categorized as a soccer ball + this->_value = _getProbability(output, Params::image::category_id); + } + + // Indiv will have the type defined in the main (phen_t) + void setFitness(float value) + { + this->_value = value; + } + + void initCaffeNet() + { + // Set test phase + Caffe::set_phase(Caffe::TEST); + + if (Params::image::use_gpu) + { + // Set GPU mode + Caffe::set_mode(Caffe::GPU); + } + else + { + // Set CPU mode + Caffe::set_mode(Caffe::CPU); + } + } + }; + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/fit/fit_map_deep_learning.hpp b/modules/dnns_easily_fooled/sferes/exp/images/fit/fit_map_deep_learning.hpp new file mode 100644 index 000000000..0919e6ca0 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/fit/fit_map_deep_learning.hpp @@ -0,0 +1,238 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef FIT_MAP_DEEP_LEARNING_HPP +#define FIT_MAP_DEEP_LEARNING_HPP + +#include "fit_deep_learning.hpp" +#include + +#include +#include + +// Headers specifics to the computations we need +#include +#include + + +#define FIT_MAP_DEEP_LEARNING(Name) SFERES_FITNESS(Name, sferes::fit::FitDeepLearning) + +namespace sferes +{ + namespace fit + { + SFERES_FITNESS(FitMapDeepLearning, sferes::fit::FitDeepLearning) + { + /* + private: + struct ArgMax + { + unsigned int category; + float probability; + }; + + ArgMax getMaxProbability(const cv::Mat& image) + { + this->initCaffeNet(); //Initialize caffe + + // Initialize test network + shared_ptr > caffe_test_net = shared_ptr >( new Net(Params::image::model_definition)); + + // Get the trained model + caffe_test_net->CopyTrainedLayersFrom(Params::image::pretrained_model); + + // Run ForwardPrefilled + float loss; // const vector*>& result = caffe_test_net.ForwardPrefilled(&loss); + + // Add images and labels manually to the ImageDataLayer + vector images(1, image); + vector labels(1, 0); + const shared_ptr > image_data_layer = + boost::static_pointer_cast >( + caffe_test_net->layer_by_name("data")); + + image_data_layer->AddImagesAndLabels(images, labels); + + vector* > dummy_bottom_vec; + const vector*>& result = caffe_test_net->Forward(dummy_bottom_vec, &loss); + + // Get the highest layer of Softmax + const float* argmax = result[1]->cpu_data(); + + ArgMax m; + m.category = (int) argmax[0]; // Category + m.probability = (float) argmax[1]; // Probability + + return m; + } + */ + + private: + void _setProbabilityList(const cv::Mat& image) + { + this->initCaffeNet(); //Initialize caffe + + // Initialize test network + shared_ptr > caffe_test_net = shared_ptr >( new Net(Params::image::model_definition)); + + // Get the trained model + caffe_test_net->CopyTrainedLayersFrom(Params::image::pretrained_model); + + // Run ForwardPrefilled + float loss; + + // Add images and labels manually to the ImageDataLayer + vector labels(10, 0); + vector images; + + // Add images to the list + if (Params::image::use_crops) + { + // Ten crops have been stored in the vector + this->_createTenCrops(image, images); + } + else + { + images.push_back(image); + } + + // Classify images + const shared_ptr > image_data_layer = + boost::static_pointer_cast >( + caffe_test_net->layer_by_name("data")); + + image_data_layer->AddImagesAndLabels(images, labels); + + const vector*>& result = caffe_test_net->ForwardPrefilled(&loss); + + // Get the highest layer of Softmax + const float* softmax = result[1]->cpu_data(); + + vector values; + + boost::accumulators::accumulator_set > max; + + // Clear the probability in case it is called twice + _prob.clear(); + + // If use 10 crops, we have to average the predictions of 10 crops + if (Params::image::use_crops) + { + // Average the predictions of evaluating 10 crops + for(int i = 0; i < Params::image::num_categories; ++i) + { + boost::accumulators::accumulator_set > avg; + + for(int j = 0; j < 10 * Params::image::num_categories; j += Params::image::num_categories) + { + avg(softmax[i + j]); + } + + double mean = boost::accumulators::mean(avg); + + // Push 1000 probabilities in the list + _prob.push_back(mean); + + max(mean); // Add this mean to a list for computing the max later + } + } + else + { + for(int i = 0; i < Params::image::num_categories; ++i) + { + float v = softmax[i]; + + // Push 1000 probabilities in the list + _prob.push_back(v); + + max(v); // Add this mean to a list for computing the max later + } + } + + float max_prob = boost::accumulators::max(max); + + // Set the fitness + this->_value = max_prob; + } + + public: + FitMapDeepLearning() : _prob(Params::image::num_categories) { } + const std::vector& desc() const { return _prob; } + + // Indiv will have the type defined in the main (phen_t) + template + void eval(const Indiv& ind) + { + if (Params::image::color) + { + // Convert image to BGR before evaluating + cv::Mat output; + + // Convert HLS into BGR because imwrite uses BGR color space + cv::cvtColor(ind.image(), output, CV_HLS2BGR); + + // Create an empty list to store get 1000 probabilities + _setProbabilityList(output); + } + else // Grayscale + { + // Create an empty list to store get 1000 probabilities + _setProbabilityList(ind.image()); + } + } + + float value(int category) const + { + assert(category < _prob.size()); + return _prob[category]; + } + + float value() const + { + return this->_value; + } + + template + void serialize(Archive & ar, const unsigned int version) { + sferes::fit::Fitness, Exact>::ret>::serialize(ar, version); + ar & BOOST_SERIALIZATION_NVP(_prob); + } + + protected: + std::vector _prob; // List of probabilities + }; + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/gen/evo_float_image.hpp b/modules/dnns_easily_fooled/sferes/exp/images/gen/evo_float_image.hpp new file mode 100644 index 000000000..416a9ac21 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/gen/evo_float_image.hpp @@ -0,0 +1,245 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef EVO_FLOAT_IMAGE_HPP_ +#define EVO_FLOAT_IMAGE_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace sferes { + namespace gen { + namespace evo_float_image { + enum mutation_t { polynomial = 0, gaussian, uniform }; + enum cross_over_t { recombination = 0, sbx, no_cross_over }; + + template + struct Mutation_f { + void operator()(Ev& ev, size_t i) { + assert(0); + } + }; + template + struct CrossOver_f { + void operator()(const Ev& f1, const Ev& f2, Ev &c1, Ev &c2) { + assert(0); + } + }; + } + + /// in range [0;1] + template + class EvoFloatImage : + public Float, Exact>::ret> { + public: + typedef Params params_t; + typedef EvoFloatImage this_t; + + EvoFloatImage() {} + + //@{ + void mutate() { + for (size_t i = 0; i < Size; i++) + if (misc::rand() < Params::evo_float_image::mutation_rate) + _mutation_op(*this, i); + _check_invariant(); + } + void cross(const EvoFloatImage& o, EvoFloatImage& c1, EvoFloatImage& c2) { + if (Params::evo_float_image::cross_over_type != evo_float_image::no_cross_over && + misc::rand() < Params::evo_float_image::cross_rate) + _cross_over_op(*this, o, c1, c2); + else if (misc::flip_coin()) { + c1 = *this; + c2 = o; + } else { + c1 = o; + c2 = *this; + } + _check_invariant(); + } + void random() { + BOOST_FOREACH(float &v, this->_data) v = misc::rand(); + _check_invariant(); + } + //@} + + protected: + evo_float_image::Mutation_f _mutation_op; + evo_float_image::CrossOver_f _cross_over_op; + void _check_invariant() const { +#ifdef DBG_ENABLED + BOOST_FOREACH(float p, this->_data) { + assert(!std::isnan(p)); + assert(!std::isinf(p)); + assert(p >= 0 && p <= 1); + } +#endif + } + }; + + // partial specialization for operators + namespace evo_float_image { + // polynomial mutation. Cf Deb 2001, p 124 ; param: eta_m + // perturbation of the order O(1/eta_m) + template + struct Mutation_f { + void operator()(Ev& ev, size_t i) { + SFERES_CONST float eta_m = Ev::params_t::evo_float_image::eta_m; + assert(eta_m != -1.0f); + float ri = misc::rand(); + float delta_i = ri < 0.5 ? + pow(2.0 * ri, 1.0 / (eta_m + 1.0)) - 1.0 : + 1 - pow(2.0 * (1.0 - ri), 1.0 / (eta_m + 1.0)); + assert(!std::isnan(delta_i)); + assert(!std::isinf(delta_i)); + float f = ev.data(i) + delta_i; + ev.data(i, misc::put_in_range(f, 0.0f, 1.0f)); + } + }; + + // gaussian mutation + template + struct Mutation_f { + void operator()(Ev& ev, size_t i) { + SFERES_CONST float sigma = Ev::params_t::evo_float_image::sigma; + float f = ev.data(i) + + misc::gaussian_rand(0, sigma * sigma); + ev.data(i, misc::put_in_range(f, 0.0f, 1.0f)); + } + }; + // uniform mutation + template + struct Mutation_f { + void operator()(Ev& ev, size_t i) { + SFERES_CONST float max = Ev::params_t::evo_float_image::max; + float f = ev.data(i) + + misc::rand(max) - max / 2.0f; + ev.data(i, misc::put_in_range(f, 0.0f, 1.0f)); + } + }; + + // recombination + template + struct CrossOver_f { + void operator()(const Ev& f1, const Ev& f2, Ev &c1, Ev &c2) { + size_t k = misc::rand(f1.size()); + for (size_t i = 0; i < k; ++i) { + c1.data(i, f1.data(i)); + c2.data(i, f2.data(i)); + } + for (size_t i = k; i < f1.size(); ++i) { + c1.data(i, f2.data(i)); + c2.data(i, f1.data(i)); + } + } + }; + + // no cross-over + template + struct CrossOver_f { + void operator()(const Ev& f1, const Ev& f2, Ev &c1, Ev &c2) { + } + }; + + // SBX (cf Deb 2001, p 113) Simulated Binary Crossover + // suggested eta : 15 + /// WARNING : this code is from deb's code (different from the + // article ...) + // A large value ef eta gives a higher probablitity for + // creating a `near-parent' solutions and a small value allows + // distant solutions to be selected as offspring. + template + struct CrossOver_f { + void operator()(const Ev& f1, const Ev& f2, Ev &child1, Ev &child2) { + SFERES_CONST float eta_c = Ev::params_t::evo_float_image::eta_c; + assert(eta_c != -1); + for (unsigned long int i = 0; i < f1.size(); i++) { + float y1 = std::min(f1.data(i), f2.data(i)); + float y2 = std::max(f1.data(i), f2.data(i)); + SFERES_CONST float yl = 0.0; + SFERES_CONST float yu = 1.0; + if (fabs(y1 - y2) > std::numeric_limits::epsilon()) { + float rand = misc::rand(); + float beta = 1.0 + (2.0 * (y1 - yl) / (y2 - y1)); + float alpha = 2.0 - pow(beta, -(eta_c + 1.0)); + float betaq = 0; + if (rand <= (1.0 / alpha)) + betaq = pow((rand * alpha), (1.0 / (eta_c + 1.0))); + else + betaq = pow ((1.0 / (2.0 - rand * alpha)) , (1.0 / (eta_c + 1.0))); + float c1 = 0.5 * ((y1 + y2) - betaq * (y2 - y1)); + beta = 1.0 + (2.0 * (yu - y2) / (y2 - y1)); + alpha = 2.0 - pow(beta, -(eta_c + 1.0)); + if (rand <= (1.0 / alpha)) + betaq = pow ((rand * alpha), (1.0 / (eta_c + 1.0))); + else + betaq = pow ((1.0/(2.0 - rand * alpha)), (1.0 / (eta_c + 1.0))); + float c2 = 0.5 * ((y1 + y2) + betaq * (y2 - y1)); + + c1 = misc::put_in_range(c1, yl, yu); + c2 = misc::put_in_range(c2, yl, yu); + + assert(!std::isnan(c1)); + assert(!std::isnan(c2)); + + if (misc::flip_coin()) { + child1.data(i, c1); + child2.data(i, c2); + } else { + child1.data(i, c2); + child2.data(i, c1); + } + } + } + } + }; + + } //evo_float_image + } // gen +} // sferes + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/imagenet/hen_256.png b/modules/dnns_easily_fooled/sferes/exp/images/imagenet/hen_256.png new file mode 100644 index 000000000..f3218b4ed Binary files /dev/null and b/modules/dnns_easily_fooled/sferes/exp/images/imagenet/hen_256.png differ diff --git a/modules/dnns_easily_fooled/sferes/exp/images/imagenet/image_list.txt b/modules/dnns_easily_fooled/sferes/exp/images/imagenet/image_list.txt new file mode 100644 index 000000000..8e462c573 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/imagenet/image_list.txt @@ -0,0 +1 @@ +/home/anh/workspace/sferes/exp/images/imagenet/hen_256.png 1 \ No newline at end of file diff --git a/modules/dnns_easily_fooled/sferes/exp/images/imagenet/imagenet_deploy_image_memory_data.prototxt b/modules/dnns_easily_fooled/sferes/exp/images/imagenet/imagenet_deploy_image_memory_data.prototxt new file mode 100644 index 000000000..ef5602dff --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/imagenet/imagenet_deploy_image_memory_data.prototxt @@ -0,0 +1,254 @@ +name: "CaffeNet" +layers { + name: "data" + type: IMAGE_DATA + top: "data" + top: "label" + image_data_param { + source: "/home/anh/workspace/sferes/exp/images/imagenet/image_list.txt" + mean_file: "/home/anh/src/caffe/data/ilsvrc12/imagenet_mean.binaryproto" + batch_size: 1 + crop_size: 227 + mirror: false + new_height: 256 + new_width: 256 + } +} +layers { + name: "conv1" + type: CONVOLUTION + bottom: "data" + top: "conv1" + blobs_lr: 1 + blobs_lr: 2 + weight_decay: 1 + weight_decay: 0 + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + } +} +layers { + name: "relu1" + type: RELU + bottom: "conv1" + top: "conv1" +} +layers { + name: "pool1" + type: POOLING + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm1" + type: LRN + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv2" + type: CONVOLUTION + bottom: "norm1" + top: "conv2" + blobs_lr: 1 + blobs_lr: 2 + weight_decay: 1 + weight_decay: 0 + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + } +} +layers { + name: "relu2" + type: RELU + bottom: "conv2" + top: "conv2" +} +layers { + name: "pool2" + type: POOLING + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "norm2" + type: LRN + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layers { + name: "conv3" + type: CONVOLUTION + bottom: "norm2" + top: "conv3" + blobs_lr: 1 + blobs_lr: 2 + weight_decay: 1 + weight_decay: 0 + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + } +} +layers { + name: "relu3" + type: RELU + bottom: "conv3" + top: "conv3" +} +layers { + name: "conv4" + type: CONVOLUTION + bottom: "conv3" + top: "conv4" + blobs_lr: 1 + blobs_lr: 2 + weight_decay: 1 + weight_decay: 0 + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + } +} +layers { + name: "relu4" + type: RELU + bottom: "conv4" + top: "conv4" +} +layers { + name: "conv5" + type: CONVOLUTION + bottom: "conv4" + top: "conv5" + blobs_lr: 1 + blobs_lr: 2 + weight_decay: 1 + weight_decay: 0 + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + } +} +layers { + name: "relu5" + type: RELU + bottom: "conv5" + top: "conv5" +} +layers { + name: "pool5" + type: POOLING + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layers { + name: "fc6" + type: INNER_PRODUCT + bottom: "pool5" + top: "fc6" + blobs_lr: 1 + blobs_lr: 2 + weight_decay: 1 + weight_decay: 0 + inner_product_param { + num_output: 4096 + } +} +layers { + name: "relu6" + type: RELU + bottom: "fc6" + top: "fc6" +} +layers { + name: "drop6" + type: DROPOUT + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc7" + type: INNER_PRODUCT + bottom: "fc6" + top: "fc7" + blobs_lr: 1 + blobs_lr: 2 + weight_decay: 1 + weight_decay: 0 + inner_product_param { + num_output: 4096 + } +} +layers { + name: "relu7" + type: RELU + bottom: "fc7" + top: "fc7" +} +layers { + name: "drop7" + type: DROPOUT + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layers { + name: "fc8" + type: INNER_PRODUCT + bottom: "fc7" + top: "fc8" + blobs_lr: 1 + blobs_lr: 2 + weight_decay: 1 + weight_decay: 0 + inner_product_param { + num_output: 1000 + } +} +layers { + name: "prob" + type: SOFTMAX + bottom: "fc8" + top: "prob" +} \ No newline at end of file diff --git a/modules/dnns_easily_fooled/sferes/exp/images/phen/cvmat_serialization.h b/modules/dnns_easily_fooled/sferes/exp/images/phen/cvmat_serialization.h new file mode 100644 index 000000000..8b865eda4 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/phen/cvmat_serialization.h @@ -0,0 +1,64 @@ +/* + * cvmat_serialization.h + * + * Created on: Jul 11, 2014 + * Author: anh + */ + +#ifndef CVMAT_SERIALIZATION_H_ +#define CVMAT_SERIALIZATION_H_ + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" + +#include +#include + +BOOST_SERIALIZATION_SPLIT_FREE(cv::Mat) +namespace boost { + namespace serialization { + + /** Serialization support for cv::Mat */ + template + void save(Archive & ar, const cv::Mat& m, const unsigned int version) + { + size_t elem_size = m.elemSize(); + size_t elem_type = m.type(); + + int cols = m.cols; + int rows = m.rows; + + ar & BOOST_SERIALIZATION_NVP(cols); + ar & BOOST_SERIALIZATION_NVP(rows); + ar & BOOST_SERIALIZATION_NVP(elem_size); + ar & BOOST_SERIALIZATION_NVP(elem_type); + + const size_t data_size = cols * rows * elem_size; + ar & boost::serialization::make_array(m.ptr(), data_size); + } + + /** Serialization support for cv::Mat */ + template + void load(Archive & ar, cv::Mat& m, const unsigned int version) + { + int cols, rows; + size_t elem_size, elem_type; + + ar & BOOST_SERIALIZATION_NVP(cols); + ar & BOOST_SERIALIZATION_NVP(rows); + ar & BOOST_SERIALIZATION_NVP(elem_size); + ar & BOOST_SERIALIZATION_NVP(elem_type); + + m.create(rows, cols, elem_type); + + size_t data_size = m.cols * m.rows * elem_size; + ar & boost::serialization::make_array(m.ptr(), data_size); + } + + } +} + + + +#endif /* CVMAT_SERIALIZATION_H_ */ diff --git a/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_color_image.hpp b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_color_image.hpp new file mode 100644 index 000000000..4ad7524b5 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_color_image.hpp @@ -0,0 +1,265 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef PHEN_COLOR_IMAGE_HPP +#define PHEN_COLOR_IMAGE_HPP + +#include +#include + +#include +#include + + +// New stuff added ------------------------------------------ + +#include +#include +#include +#include + +#include "cvmat_serialization.h" // Serialize cv::Mat +#include // Google Logging + +#include "phen_image.hpp" // Base Image class + +// New stuff added ------------------------------------------ + +namespace sferes +{ + namespace phen + { + // hyperneat-inspired phenotype, based on a cppn + SFERES_INDIV(ColorImage, Image) + { + public: + typedef Gen gen_t; + typedef typename gen_t::nn_t gen_nn_t; + SFERES_CONST size_t nb_cppn_inputs = 2 + 2; + SFERES_CONST size_t nb_cppn_outputs = 3; // Red, Green, Blue + + ColorImage():_developed(false) + { + } + + void develop() + { + // Check if phenotype has not been developed + if (!_developed) + { + // Initialize the image to be a white background image + reset_image(); + + this->gen().init(); + // develop the parameters + BGL_FORALL_VERTICES_T(v, this->gen().get_graph(), + typename gen_t::nn_t::graph_t) + { + this->gen().get_graph()[v].get_afparams().develop(); + this->gen().get_graph()[v].get_pfparams().develop(); + } + BGL_FORALL_EDGES_T(e, this->gen().get_graph(), + typename gen_t::nn_t::graph_t) + this->gen().get_graph()[e].get_weight().develop(); + + assert(nb_cppn_inputs == this->gen().get_nb_inputs()); + assert(nb_cppn_outputs == this->gen().get_nb_outputs()); + + // Change specific color of every pixel in the image + for (int x = 0; x < _image.cols; ++x) + { + for (int y = 0; y < _image.rows; ++y) + { + std::vector output = cppn_value(x, y); // HLS array + + cv::Vec3b color = _image.at(cv::Point(x,y)); + + color[0] = this->convert_to_color_scale(255, output[0]); // H + color[1] = this->convert_to_color_scale(255, output[1]); // L + color[2] = this->convert_to_color_scale(255, output[2]); // S + + _image.at(cv::Point(x,y)) = color; + } + } + + _developed = true; // Raise the flag that this phenotype has been developed. + } + } + + /** + * Programmatically put the patterns in here. + */ + void reset_image() + { + // Paint background : white + _image = cv::Mat(Params::image::size, Params::image::size, CV_8UC3, cv::Scalar(255,255,255)); + } + + double normalize_map_xy_to_grid(const int & r_xyVal, const int & r_numVoxelsXorY) + { + // turn the xth or yth node into its coordinates on a grid from -1 to 1, e.g. x values (1,2,3,4,5) become (-1, -.5 , 0, .5, 1) + // this works with even numbers, and for x or y grids only 1 wide/tall, which was not the case for the original + // e.g. see findCluster for the orignal versions where it was not a funciton and did not work with odd or 1 tall/wide #s + + double coord; + + if (r_numVoxelsXorY==1) coord = 0; + else coord = -1 + ( r_xyVal * 2.0/(r_numVoxelsXorY-1) ); + + return(coord); + } + + std::vector cppn_value(size_t i, size_t j) + { + // Euclidean distance from center + const float xNormalized = normalize_map_xy_to_grid(i, Params::image::size); + const float yNormalized = normalize_map_xy_to_grid(j, Params::image::size); + const float distanceFromCenter = sqrt(pow(double(xNormalized),2.0)+pow(double(yNormalized),2.0)); + + // CPPN inputs + std::vector in(nb_cppn_inputs); + this->gen().init(); + in[0] = i; // x + in[1] = j; // y + in[2] = distanceFromCenter; // distance from center + in[3] = 1.0; // bias + + for (size_t k = 0; k < this->gen().get_depth(); ++k) + this->gen().step(in); + + // Get the CPPN output + std::vector out(nb_cppn_outputs); + out[0] = this->gen().get_outf(0); // Hue + out[1] = this->gen().get_outf(1); // Lightness + out[2] = this->gen().get_outf(2); // Saturation + + return out; + } + + /** + * Convert [-1, 1] range to a color scale + * [0, 255] for Saturation / Brightness or + * [0, 180] for Hue + */ + static int convert_to_color_scale(const int scale, const float value) + { + int color = value * scale; + + if (value < 0) + { + color *= -1; + } + + return color; + } + + void write_png_image(const std::string fileName, const cv::Mat& map) + { + // Read the target bitmap + try + { + cv::Mat output; + + // Convert HLS into BGR because imwrite uses BGR color space + cv::cvtColor(map, output, CV_HLS2BGR); + +// // DEBUGGING +// // Convert to 256x256 +// cv::Size size(256, 256); +// resize(output, output, size); + + // Parameters for cv::imwrite + std::vector write_params; + write_params.push_back(CV_IMWRITE_PNG_COMPRESSION); + write_params.push_back(0); // Fastest writing without compression + + // Write to a file + imwrite(fileName, output, write_params); + } + catch (std::runtime_error& ex) + { + std::cout << "Failed to write image: " << fileName << std::endl; + fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what()); + } + } + + void log_best_image_fitness(const std::string title) + { + std::vector < std::string > list; + list.push_back (title); + list.push_back (".png"); + const std::string fileName = boost::algorithm::join (list, ""); + + write_png_image(fileName, _image); + + std::cout << "Written to " << title << std::endl; + } + + cv::Mat& image() { + return _image; + } + const cv::Mat& image() const { + return _image; + } + + /** + * Returns image in BGR color space. + */ + void imageBGR(cv::Mat& output) { + // Convert image to BGR before evaluating +// cv::Mat output; + + // Convert HLS into BGR because imwrite uses BGR color space + cv::cvtColor(_image, output, CV_HLS2BGR); + } + + template + void serialize(Archive & ar, const unsigned int version) { + dbg::trace trace("phen", DBG_HERE); + sferes::phen::Indiv, Exact>::ret>::serialize(ar, version); + ar & BOOST_SERIALIZATION_NVP(_image); + ar & BOOST_SERIALIZATION_NVP(_developed); + } + + protected: + cv::Mat _image; + bool _developed; + + }; + } +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_grayscale_image.hpp b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_grayscale_image.hpp new file mode 100644 index 000000000..b278750af --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_grayscale_image.hpp @@ -0,0 +1,233 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef PHEN_GRAYSCALE_IMAGE_HPP +#define PHEN_GRAYSCALE_IMAGE_HPP + +#include +#include "phen_image.hpp" +#include + +#include +#include + + +// New stuff added ------------------------------------------ + +#include +#include +#include +#include + +#include "cvmat_serialization.h" // Serialize cv::Mat +#include // Google Logging + +// New stuff added ------------------------------------------ + +namespace sferes +{ + namespace phen + { + // hyperneat-inspired phenotype, based on a cppn + SFERES_INDIV(GrayscaleImage, Image) + { + public: + typedef Gen gen_t; + typedef typename gen_t::nn_t gen_nn_t; + SFERES_CONST size_t nb_cppn_inputs = Params::dnn::nb_inputs; + SFERES_CONST size_t nb_cppn_outputs = Params::dnn::nb_outputs; // Red, Green, Blue + + GrayscaleImage():_developed(false) + { + } + + void develop() + { + // Check if phenotype has not been developed + if (!_developed) + { + // Initialize the image to be a white background image + reset_image(); + + this->gen().init(); + // develop the parameters + BGL_FORALL_VERTICES_T(v, this->gen().get_graph(), + typename gen_t::nn_t::graph_t) + { + this->gen().get_graph()[v].get_afparams().develop(); + this->gen().get_graph()[v].get_pfparams().develop(); + } + BGL_FORALL_EDGES_T(e, this->gen().get_graph(), + typename gen_t::nn_t::graph_t) + this->gen().get_graph()[e].get_weight().develop(); + + assert(nb_cppn_inputs == this->gen().get_nb_inputs()); + assert(nb_cppn_outputs == this->gen().get_nb_outputs()); + + // Change specific color of every pixel in the image + for (int x = 0; x < _image.cols; ++x) + { + for (int y = 0; y < _image.rows; ++y) + { + float output = cppn_value(x, y); // Single grayscale value (intensity) + + // Change pixel intensity of grayscale images + // Ref: http://docs.opencv.org/doc/user_guide/ug_mat.html + _image.at(cv::Point(x,y)) = convert_to_color_scale(255, output); + } + } + + _developed = true; // Raise the flag that this phenotype has been developed. + } + } + + /** + * Programmatically put the patterns in here. + */ + void reset_image() + { + // Paint background : black + _image = cv::Mat(Params::image::size, Params::image::size, CV_8UC1, cv::Scalar(0, 0, 0)); + } + + double normalize_map_xy_to_grid(const int & r_xyVal, const int & r_numVoxelsXorY) + { + // turn the xth or yth node into its coordinates on a grid from -1 to 1, e.g. x values (1,2,3,4,5) become (-1, -.5 , 0, .5, 1) + // this works with even numbers, and for x or y grids only 1 wide/tall, which was not the case for the original + // e.g. see findCluster for the orignal versions where it was not a funciton and did not work with odd or 1 tall/wide #s + + double coord; + + if (r_numVoxelsXorY==1) coord = 0; + else coord = -1 + ( r_xyVal * 2.0/(r_numVoxelsXorY-1) ); + + return(coord); + } + + float cppn_value(size_t i, size_t j) + { + // Euclidean distance from center + const float xNormalized = normalize_map_xy_to_grid(i, Params::image::size); + const float yNormalized = normalize_map_xy_to_grid(j, Params::image::size); + const float distanceFromCenter = sqrt(pow(double(xNormalized),2.0)+pow(double(yNormalized),2.0)); + + // CPPN inputs + std::vector in(nb_cppn_inputs); + this->gen().init(); + in[0] = i; // x + in[1] = j; // y + in[2] = distanceFromCenter; // distance from center + in[3] = 1.0; // bias + + for (size_t k = 0; k < this->gen().get_depth(); ++k) + this->gen().step(in); + + // Get the CPPN output + return this->gen().get_outf(0); // Grayscale value + } + + /** + * Convert [-1, 1] range to a color scale + * [0, 255] for Saturation / Brightness or + * [0, 180] for Hue + */ + static int convert_to_color_scale(const int scale, const float value) + { + int color = value * scale; + + if (value < 0) + { + color *= -1; + } + + return color; + } + + void write_png_image(const std::string fileName, const cv::Mat& map) + { + // Read the target bitmap + try + { + // Parameters for cv::imwrite + std::vector write_params; + write_params.push_back(CV_IMWRITE_PNG_COMPRESSION); + write_params.push_back(0); // Fastest writing without compression + + // Write to a file + imwrite(fileName, map, write_params); + } + catch (std::runtime_error& ex) + { + std::cout << "Failed to write image: " << fileName << std::endl; + fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what()); + } + } + + void log_best_image_fitness(const std::string title) + { + std::vector < std::string > list; + list.push_back (title); + list.push_back (".png"); + const std::string fileName = boost::algorithm::join (list, ""); + + write_png_image(fileName, _image); + + std::cout << "Written to " << title << std::endl; + } + + cv::Mat& image() { + return _image; + } + const cv::Mat& image() const { + return _image; + } + + template + void serialize(Archive & ar, const unsigned int version) { + dbg::trace trace("phen", DBG_HERE); + sferes::phen::Image, Exact>::ret>::serialize(ar, version); + ar & BOOST_SERIALIZATION_NVP(_image); + ar & BOOST_SERIALIZATION_NVP(_developed); + } + + protected: + cv::Mat _image; + bool _developed; + }; + } +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_grayscale_image_direct.hpp b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_grayscale_image_direct.hpp new file mode 100644 index 000000000..04c61d430 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_grayscale_image_direct.hpp @@ -0,0 +1,183 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef PHEN_GRAYSCALE_IMAGE_DIRECT_HPP +#define PHEN_GRAYSCALE_IMAGE_DIRECT_HPP + +#include +#include + +#include +#include + + +// New stuff added ------------------------------------------ + +#include +#include +#include +#include + +#include "cvmat_serialization.h" // Serialize cv::Mat +#include // Google Logging + +#include "phen_image.hpp" // Base Image class + +// New stuff added ------------------------------------------ + +namespace sferes +{ + namespace phen + { + // hyperneat-inspired phenotype, based on a cppn + SFERES_INDIV(GrayscaleImageDirect, Image) + { + public: + + GrayscaleImageDirect():_developed(false) + { + } + + void develop() + { + // Check if phenotype has not been developed + if (!_developed) + { + // Initialize the image to be a white background image + reset_image(); + + unsigned i = 0; // Index to access genome + + // Change specific color of every pixel in the image + for (int x = 0; x < _image.cols; ++x) + { + for (int y = 0; y < _image.rows; ++y) + { + float output = this->_gen.data(i); + + // Change pixel intensity of grayscale images + // Ref: http://docs.opencv.org/doc/user_guide/ug_mat.html + _image.at(cv::Point(x,y)) = convert_to_color_scale(255, output); + + ++i; // Move to the next pixel location in genome + } + } + + _developed = true; // Raise the flag that this phenotype has been developed. + } + } + + /** + * Programmatically put the patterns in here. + */ + void reset_image() + { + // Paint background : black + _image = cv::Mat(Params::image::size, Params::image::size, CV_8UC1, cv::Scalar(0, 0, 0)); + } + + /** + * Convert [-1, 1] range to a color scale + * [0, 255] for Saturation / Brightness or + * [0, 180] for Hue + */ + static int convert_to_color_scale(const int scale, const float value) + { + int color = value * scale; + + if (value < 0) + { + color *= -1; + } + + return color; + } + + void write_png_image(const std::string fileName, const cv::Mat& map) + { + // Read the target bitmap + try + { + // Parameters for cv::imwrite + std::vector write_params; + write_params.push_back(CV_IMWRITE_PNG_COMPRESSION); + write_params.push_back(0); // Fastest writing without compression + + // Write to a file + imwrite(fileName, map, write_params); + } + catch (std::runtime_error& ex) + { + std::cout << "Failed to write image: " << fileName << std::endl; + fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what()); + } + } + + void log_best_image_fitness(const std::string title) + { + std::vector < std::string > list; + list.push_back (title); + list.push_back (".png"); + const std::string fileName = boost::algorithm::join (list, ""); + + write_png_image(fileName, _image); + + std::cout << "Written to " << title << std::endl; + } + + cv::Mat& image() { + return _image; + } + const cv::Mat& image() const { + return _image; + } + + template + void serialize(Archive & ar, const unsigned int version) { + dbg::trace trace("phen", DBG_HERE); + sferes::phen::Indiv, Exact>::ret>::serialize(ar, version); + ar & BOOST_SERIALIZATION_NVP(_image); + ar & BOOST_SERIALIZATION_NVP(_developed); + } + + protected: + cv::Mat _image; + bool _developed; + }; + } +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_image.hpp b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_image.hpp new file mode 100644 index 000000000..93f258d40 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_image.hpp @@ -0,0 +1,119 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef PHEN_IMAGE_HPP +#define PHEN_IMAGE_HPP + +#include +#include +#include + +#include +#include + + +// New stuff added ------------------------------------------ + +#include +#include +#include +#include + +#include "cvmat_serialization.h" // Serialize cv::Mat +#include // Google Logging + +//#include // uuid class +#include // generators +#include // streaming operators etc. +#include // serialization + +// New stuff added ------------------------------------------ + +namespace sferes +{ + namespace phen + { + // hyperneat-inspired phenotype, based on a cppn + SFERES_INDIV(Image, Indiv) + { + public: + Image(): _created_gen(0) + { + boost::uuids::uuid uuid = boost::uuids::random_generator()(); + _id = uuid; + } + + /* + * Get the ID of this organism. + */ + boost::uuids::uuid id() + { + return _id; + } + + /* + * Set the generation when this organism is created. + */ + void set_created_gen(const size_t generation) + { + _created_gen = generation; + } + + /* + * Get the generation when this organism is created. + */ + size_t created_gen() const + { + return _created_gen; + } + + + template + void serialize(Archive & ar, const unsigned int version) + { + sferes::phen::Indiv, Exact>::ret>::serialize(ar, version); + ar & boost::serialization::make_nvp("uuid", _id.data); + ar & BOOST_SERIALIZATION_NVP(_created_gen); + } + + protected: + boost::uuids::uuid _id; // The unique id of this organism + size_t _created_gen; // The generation when this image is created + }; + } +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_image_direct.hpp b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_image_direct.hpp new file mode 100644 index 000000000..8854e8a63 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/phen/phen_image_direct.hpp @@ -0,0 +1,210 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef PHEN_IMAGE_DIRECT_HPP +#define PHEN_IMAGE_DIRECT_HPP + +#include +#include "phen_image.hpp" +#include + +#include +#include + + +// New stuff added ------------------------------------------ + +#include +#include +#include +#include + +#include "cvmat_serialization.h" // Serialize cv::Mat +#include // Google Logging + +// New stuff added ------------------------------------------ + +namespace sferes +{ + namespace phen + { + // hyperneat-inspired phenotype, based on a cppn + SFERES_INDIV(ImageDirect, Image) + { + public: + + ImageDirect():_developed(false) + { + } + + void develop() + { + // Check if phenotype has not been developed + if (!_developed) + { + // Initialize the image to be a white background image + reset_image(); + + unsigned long int i = 0; // Index to access genome + + // Change specific color of every pixel in the image + for (int x = 0; x < _image.cols; ++x) + { + for (int y = 0; y < _image.rows; ++y) + { + std::vector output; + + // Extract 3 values for H, L, S + for (int v = 0; v < 3; ++v) + { + output.push_back(this->_gen.data(i)); + ++i; // Move to the next color value for the current pixel location in genome + } + + cv::Vec3b color = _image.at(cv::Point(x,y)); + + color[0] = convert_to_color_scale(255, output[0]); // H + color[1] = convert_to_color_scale(255, output[1]); // L + color[2] = convert_to_color_scale(255, output[2]); // S + + _image.at(cv::Point(x,y)) = color; + } + } + + _developed = true; // Raise the flag that this phenotype has been developed. + } + } + + /** + * Programmatically put the patterns in here. + */ + void reset_image() + { + // Paint background : white + _image = cv::Mat(Params::image::size, Params::image::size, CV_8UC3, cv::Scalar(255,255,255)); + } + + /** + * Convert [-1, 1] range to a color scale + * [0, 255] for Saturation / Brightness or + * [0, 180] for Hue + */ + static int convert_to_color_scale(const int scale, const float value) + { + int color = value * scale; + + if (value < 0) + { + color *= -1; + } + + return color; + } + + void write_png_image(const std::string fileName, const cv::Mat& map) + { + // Read the target bitmap + try + { + cv::Mat output; + + // Convert HLS into BGR because imwrite uses BGR color space + cv::cvtColor(map, output, CV_HLS2BGR); + + // Parameters for cv::imwrite + std::vector write_params; + write_params.push_back(CV_IMWRITE_PNG_COMPRESSION); + write_params.push_back(0); // Fastest writing without compression + + // Write to a file + imwrite(fileName, output, write_params); + } + catch (std::runtime_error& ex) + { + std::cout << "Failed to write image: " << fileName << std::endl; + fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what()); + } + } + + void log_best_image_fitness(const std::string title) + { + std::vector < std::string > list; + list.push_back (title); + list.push_back (".png"); + const std::string fileName = boost::algorithm::join (list, ""); + + write_png_image(fileName, _image); + + std::cout << "Written to " << title << std::endl; + } + + cv::Mat& image() { + return _image; + } + const cv::Mat& image() const { + return _image; + } + + /** + * Returns image in BGR color space. + */ + void imageBGR(cv::Mat& output) + { + // Convert image to BGR before evaluating +// cv::Mat output; + + // Convert HLS into BGR because imwrite uses BGR color space + cv::cvtColor(_image, output, CV_HLS2BGR); + +// return output; + } + + template + void serialize(Archive & ar, const unsigned int version) { + dbg::trace trace("phen", DBG_HERE); + sferes::phen::Indiv, Exact>::ret>::serialize(ar, version); + ar & BOOST_SERIALIZATION_NVP(_image); + ar & BOOST_SERIALIZATION_NVP(_developed); + } + + protected: + cv::Mat _image; + bool _developed; + }; + } +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/settings.h b/modules/dnns_easily_fooled/sferes/exp/images/settings.h new file mode 100644 index 000000000..402e40167 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/settings.h @@ -0,0 +1,17 @@ +/* + * settings.h + * + * Created on: Jul 16, 2014 + * Author: anh + */ + +#ifndef SETTINGS_H_ +#define SETTINGS_H_ + + +//#define LOCAL_RUN + +//#define NB_THREADS 16 + + +#endif /* SETTINGS_H_ */ diff --git a/modules/dnns_easily_fooled/sferes/exp/images/stat/best_fit_image.hpp b/modules/dnns_easily_fooled/sferes/exp/images/stat/best_fit_image.hpp new file mode 100644 index 000000000..725ea485b --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/stat/best_fit_image.hpp @@ -0,0 +1,171 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef BEST_FIT_IMAGE_ +#define BEST_FIT_IMAGE_ + +#include +#include +#include "image_stat.hpp" +#include +#include + +#include +#include + +// Headers specifics to the computations we need +#include +#include +#include + +#include +#include + +namespace sferes { + namespace stat { + // assume that the population is sorted ! + SFERES_STAT(BestFitImage, ImageStat){ + public: + typedef boost::shared_ptr indiv_t; + typedef std::vector pop_t; + + template + void refresh(const E& ea) { + assert(!ea.pop().empty()); + //_best = *ea.pop().begin(); + + // Create the log file + this->_create_log_file(ea, "bestfit.dat"); + + if (ea.dump_enabled() && ( ea.gen() % Params::pop::dump_period == 0 )) + { + // The data for which we wish to calculate median as the boost accumulator does not. + std::vector< double > data; + + // Calculate all stats: mean, max, min + boost::accumulators::accumulator_set > stats; + + int best_id = 0; + float best_fitness = 0.0f; + int id = 0; + + BOOST_FOREACH(indiv_t i, ea.pop()) + { + // With the stats object in hand, all we need is to push in the data. + float fitness = i->fit().value(); + stats(fitness); + data.push_back(fitness); // Add it to the list for calculating median later + + // Get the best individual by fitness + if (fitness > best_fitness) + { + best_id = id; + best_fitness = fitness; + } + + ++id; + } + + // Best individual in the current pop + _best = ea.pop()[best_id]; + + assert(data.size() == Params::pop::size); // Make sure the list of values is of correct size + double median = sferes::util::Median::calculate_median(data); // Get the mdian + + // Dump best_fit.dat file + (*this->_log_file) << ea.gen() + << " " << median + << " " << boost::accumulators::mean(stats) + << " " << boost::accumulators::max(stats) + << " " << boost::accumulators::min(stats) + << std::endl; + + // Dump best image + if (Params::log::best_image) + { + std::string image_fitness = boost::lexical_cast(_best->fit().value()); + std::string image_gen = boost::lexical_cast(ea.gen()); + std::string image_file = ea.res_dir() + "/" + image_gen + "_" + image_fitness; + + _best->log_best_image_fitness(image_file); + } + + // Save this generation population to a file + // Clear all individuals + _pop.clear(); + + // The mixed population (before selection) + for (size_t i = 0; i < ea.pop().size(); ++i) + { + _pop.push_back(ea.pop()[i]); + } + } + } + void show(std::ostream& os, size_t k) { + _best->develop(); + _best->show(os); + _best->fit().set_mode(fit::mode::view); + _best->fit().eval(*_best); + + } + const boost::shared_ptr best() const { + return _best; + } + + template + void serialize(Archive& ar, const unsigned int version) + { + ar & BOOST_SERIALIZATION_NVP(_pop); + ar & BOOST_SERIALIZATION_NVP(_best); + } + + const pop_t& getPopulation() const + { + return _pop; + } + + protected: + indiv_t _best; + pop_t _pop; + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/stat/best_fit_map_image.hpp b/modules/dnns_easily_fooled/sferes/exp/images/stat/best_fit_map_image.hpp new file mode 100644 index 000000000..2dde5122e --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/stat/best_fit_map_image.hpp @@ -0,0 +1,162 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef BEST_FIT_MAP_IMAGE_ +#define BEST_FIT_MAP_IMAGE_ + +#include +#include +#include "image_stat.hpp" +#include +#include + +#include +#include + +// Headers specifics to the computations we need +#include +#include +#include + +#include +#include + +namespace sferes { + namespace stat { + // assume that the population is sorted ! + SFERES_STAT(BestFitMapImage, ImageStat){ + + public: + typedef boost::shared_ptr indiv_t; + typedef std::vector pop_t; + + template + void refresh(const E& ea) { + assert(!ea.pop().empty()); + + // Create the log file + this->_create_log_file(ea, "bestfit.dat"); + + if (ea.dump_enabled() && ( ea.gen() % Params::pop::dump_period == 0 )) + { + // The data for which we wish to calculate median as the boost accumulator does not. + std::vector< double > data; + + // Calculate all stats: mean, max, min + boost::accumulators::accumulator_set > stats; + + int best_x = 0; + int best_y = 0; + float best_fitness = 0.0f; + + // Iterate through the map of phenotypes in MAP-Elites + for (int x = 0; x < Params::ea::res_x; ++x) + { + for (int y = 0; y < Params::ea::res_y; ++y) + { + indiv_t i = ea.archive()[x][y]; + + float fitness = i->fit().value(y); // Get the fitness of individual + stats(fitness); // Add it to the stats accumulator + data.push_back(fitness); // Add it to the list for calculating median later + + // Get the best individual by fitness + if (fitness > best_fitness) + { + best_x = x; // Record the best x and y + best_y = y; + best_fitness = fitness; + } + } + } + + // Best individual in the current pop + _best = ea.archive()[best_x][best_y]; + + assert(data.size() == Params::ea::res_y); // Make sure the list of values is of correct size + double median = sferes::util::Median::calculate_median(data); // Get the mdian + + // Dump best_fit.dat file + (*this->_log_file) << ea.gen() + << " " << median + << " " << boost::accumulators::mean(stats) + << " " << boost::accumulators::max(stats) + << " " << ea.jumps() + << " " << boost::accumulators::min(stats) + << std::endl; + + // Dump best image + if (Params::log::best_image) + { + std::string image_fitness = boost::lexical_cast(best_fitness); + std::string image_gen = boost::lexical_cast(ea.gen()); + std::string image_file = ea.res_dir() + "/" + image_gen + "_" + image_fitness; + + _best->log_best_image_fitness(image_file); + } + + } + } + void show(std::ostream& os, size_t k) { + _best->develop(); + _best->show(os); + _best->fit().set_mode(fit::mode::view); + _best->fit().eval(*_best); + } + + const boost::shared_ptr best() const { + return _best; + } + + template + void serialize(Archive& ar, const unsigned int version) + { + ar & BOOST_SERIALIZATION_NVP(_best); + } + + + protected: + indiv_t _best; + + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/stat/image_stat.hpp b/modules/dnns_easily_fooled/sferes/exp/images/stat/image_stat.hpp new file mode 100644 index 000000000..365ed508c --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/stat/image_stat.hpp @@ -0,0 +1,70 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef IMAGE_STAT_ +#define IMAGE_STAT_ + +#include +#include +#include +#include +#include + +#include + +namespace sferes { + namespace stat { + // assume that the population is sorted ! + SFERES_STAT(ImageStat, Stat){ + + protected: + template + void _create_log_file(const E& ea, const std::string& name) { + if (!this->_log_file && ea.dump_enabled()) { + + // Create a new file if not exists, append if exists + std::string log = ea.res_dir() + "/" + name; + + // Append if file exists: std::fstream::app + this->_log_file = boost::shared_ptr(new std::ofstream(log.c_str(), std::fstream::app)); + } + } + + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/stat/stat_map_image.hpp b/modules/dnns_easily_fooled/sferes/exp/images/stat/stat_map_image.hpp new file mode 100644 index 000000000..321ef508d --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/stat/stat_map_image.hpp @@ -0,0 +1,208 @@ +#ifndef STAT_MAP_IMAGE_HPP +#define STAT_MAP_IMAGE_HPP + +#include +#include +#include +#include +#include + +namespace sferes +{ + namespace stat + { + SFERES_STAT(MapImage, Stat) + { + public: + typedef boost::shared_ptr phen_t; + typedef boost::multi_array array_t; + typedef boost::array point_t; + + MapImage() : _xs(0), _ys(0) {} + template + void refresh(const E& ea) + { + _archive.clear(); + _xs = ea.archive().shape()[0]; + _ys = ea.archive().shape()[1]; + assert(_xs == Params::ea::res_x); + assert(_ys == Params::ea::res_y); + + for (size_t i = 0; i < _xs; ++i) + for (size_t j = 0; j < _ys; ++j) + { + phen_t p = ea.archive()[i][j]; + _archive.push_back(p); + } + + // Report current generation every 10 generations + if (ea.gen() % 10 == 0) + { + std::cout << "gen.. " << ea.gen() << std::endl; + } + + if (ea.gen() % Params::pop::dump_period == 0) + { + _write_archive(ea.archive(), ea.parents(), std::string("archive_"), ea, ea.gen()); + + #ifdef MAP_WRITE_PARENTS + _write_parents(ea.archive(), ea.parents(), std::string("parents_"), ea); + #endif + } + } + + const std::vector& getPopulation() const + { + return _archive; + } + + const std::vector& archive() const + { + return _archive; + } + + void show(std::ostream& os, size_t k) + { + std::cerr << "loading "<< k / _ys << "," << k % _ys << std::endl; + if (_archive[k]) + { + _archive[k]->develop(); + _archive[k]->show(os); + _archive[k]->fit().set_mode(fit::mode::view); + _archive[k]->fit().eval(*_archive[k]); + } + else + std::cerr << "Warning, no point here" << std::endl; + } + + template + void serialize(Archive& ar, const unsigned int version) + { + ar & BOOST_SERIALIZATION_NVP(_archive); + ar & BOOST_SERIALIZATION_NVP(_xs); + ar & BOOST_SERIALIZATION_NVP(_ys); + } + + protected: + std::vector _archive; + int _xs, _ys; + + template + void _write_parents(const array_t& array, + const array_t& p_array, + const std::string& prefix, + const EA& ea) const + { + std::cout << "writing..." << prefix << ea.gen() << std::endl; + std::string fname = ea.res_dir() + "/" + + prefix + + boost::lexical_cast< + std::string>(ea.gen()) + + std::string(".dat"); + std::ofstream ofs(fname.c_str()); + for (size_t i = 0; i < _xs; ++i) + for (size_t j = 0; j < _ys; ++j) + if (array[i][j] && p_array[i][j]) + { + point_t p = _get_point(p_array[i][j]); + size_t x = round(p[0] * _xs); + size_t y = round(p[1] * _ys); + ofs << i / (float) _xs + << " " << j / (float) _ys + << " " << p_array[i][j]->fit().value() + << " " << x / (float) _xs + << " " << y / (float) _ys + << " " << array[i][j]->fit().value() + << std::endl; + } + } + + std::string _make_gen_dir(const std::string& res_dir, const int gen) const + { + std::string gen_dir = res_dir + std::string("/map_gen_") + boost::lexical_cast(gen); + boost::filesystem::path my_path(gen_dir); + boost::filesystem::create_directory(my_path); + + return gen_dir; + } + + template + void _write_archive(const array_t& array, + const array_t& p_array, + const std::string& prefix, + const EA& ea, + const int gen) const + { + std::cout << "writing..." << prefix << ea.gen() << std::endl; + std::string fname = ea.res_dir() + "/" + + prefix + + boost::lexical_cast< + std::string>(ea.gen()) + + std::string(".dat"); + + std::ofstream ofs(fname.c_str()); + for (size_t i = 0; i < _xs; ++i) + { + for (size_t j = 0; j < _ys; ++j) + { + if (array[i][j]) + { + float fitness = array[i][j]->fit().value(j); + + ofs + << " " << j // This dimension is categorical (1-1000). No need to normalize to be [0, 1]. + << " " << fitness; + + // CPPN genome info +// << " " << array[i][j]->gen().get_nb_neurons() +// << " " << array[i][j]->gen().get_nb_connections(); + + if (Params::image::record_lineage) + { + ofs << " " << array[i][j]->id(); // Record the id of this organism + + // Only print out the parent if this is a newly created organism + if (array[i][j]->created_gen() == ea.gen()) + { + ofs << " " << p_array[i][j]->id(); // Record the id of this organism's parent + } + } + + ofs << std::endl; // End of line + + bool dump_map = false; + + // Always print the map in the first generation and last generation +// if ( +// (gen == 0 || gen == Params::pop::nb_gen - Params::pop::dump_period) + // Print out only when there is an improvement of 0.1 +// || (fitness - p_array[i][j]->fit().value(j) >= 0.3) +// ) + { + dump_map = true; + } + + // Check if we should print out + if (dump_map) + { + // Create the directory + const std::string gen_dir = _make_gen_dir(ea.res_dir(), gen); + + // Print out images at the current generation + std::string image_gen = boost::lexical_cast(ea.gen()); + std::string category = boost::lexical_cast(j); + std::string image_fitness = boost::lexical_cast(fitness); + std::string image_file = gen_dir + "/map_" + image_gen + "_" + category + "_" + image_fitness; + + array[i][j]->log_best_image_fitness(image_file); + } + } + } + } + } + + }; + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/util/median.hpp b/modules/dnns_easily_fooled/sferes/exp/images/util/median.hpp new file mode 100644 index 000000000..178120703 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/util/median.hpp @@ -0,0 +1,80 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef UTIL_MEDIAN +#define UTIL_MEDIAN + +#include +#include // std::vector + +namespace sferes { + namespace util { + class Median + { + public: + /** + * Calculate the median of the doubles in the given list. + */ + static double calculate_median(std::vector& list) + { + size_t size = list.size(); // Size of the list + + assert(size > 0); + + std::sort(list.begin(), list.end()); + + // If there are an odd number of doubles + if (size % 2 == 1) + { + // Take the middle number + size_t index_middle = (size - 1)/2; + return list[index_middle]; + } + // If there are an even number of doubles + else + { + size_t index_above = size / 2; + size_t index_below = index_above - 1; + + // Average of two middle numbers + return (list[index_above] + list[index_below]) / 2; + } + } + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/exp/images/wscript b/modules/dnns_easily_fooled/sferes/exp/images/wscript new file mode 100644 index 000000000..d4c313817 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/exp/images/wscript @@ -0,0 +1,14 @@ + +#! /usr/bin/env python +def build(bld): + obj = bld.new_task_gen('cxx', 'program') + obj.source = 'dl_map_elites_images_imagenet_direct_encoding.cpp' + #obj.source = 'dl_rank_simple_images_mnist_direct_encoding.cpp' + #obj.source = 'dl_rank_simple_images.cpp' + obj.includes = '. ../../ /usr/local/cuda-6.0/include' + obj.uselib_local = 'sferes2' + obj.uselib = '' + obj.cxxflags = ['-std=c++11'] + obj.target = 'images' + obj.uselib_local = 'sferes2' + obj.lib=['png', 'cudart', 'caffe', 'opencv_core', 'opencv_highgui', 'opencv_imgproc', 'lmdb', 'glog'] diff --git a/modules/dnns_easily_fooled/sferes/install_caffe.sh b/modules/dnns_easily_fooled/sferes/install_caffe.sh new file mode 100755 index 000000000..ce688ed0f --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/install_caffe.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Uninstall caffe from Sferes +rm -rf ~/src/sferes/include/caffe/ +rm ~/src/sferes/lib/libcaffe.* +echo "Removed old installation in ~/src/sferes/" + +# Reinstall caffe to Sferes + +# Include files +cp -R ~/src/caffe/include/caffe/ ~/src/sferes/include/ +echo "Installed header files from ~/src/caffe/include/caffe/" + +cp -R ~/src/caffe/build/src/caffe/ ~/src/sferes/include/ +echo "Installed header files from ~/src/caffe/build/src/caffe/" + +# Library files +cp ~/src/caffe/build/lib/libcaffe.* ~/src/sferes/lib/ +echo "Installed library files from ~/src/caffe/build/lib/" + +echo "Done." diff --git a/modules/dnns_easily_fooled/sferes/modules.conf b/modules/dnns_easily_fooled/sferes/modules.conf new file mode 100644 index 000000000..b5291ae81 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules.conf @@ -0,0 +1,3 @@ +nn2 +map_elite + diff --git a/modules/dnns_easily_fooled/sferes/modules/.placeholder b/modules/dnns_easily_fooled/sferes/modules/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/modules/dnns_easily_fooled/sferes/modules/map_elite/fit_map.hpp b/modules/dnns_easily_fooled/sferes/modules/map_elite/fit_map.hpp new file mode 100644 index 000000000..e256a7ff0 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/map_elite/fit_map.hpp @@ -0,0 +1,62 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#include + +#define FIT_MAP(Name) SFERES_FITNESS(Name, sferes::fit::FitMap) + +namespace sferes +{ + namespace fit + { + SFERES_FITNESS(FitMap, sferes::fit::Fitness) + { + public: + FitMap() : _desc(2) { } + const std::vector& desc() const { return _desc; } + void set_desc(float x1, float x2) + { + assert(x1 >= 0); + assert(x2 >= 0); + assert(x1 <= 1); + assert(x2 <= 1); + assert(_desc.size() >= 2); + _desc[0] = x1; + _desc[1] = x2; + } + protected: + std::vector _desc; + }; + } +} diff --git a/modules/dnns_easily_fooled/sferes/modules/map_elite/map_elite.hpp b/modules/dnns_easily_fooled/sferes/modules/map_elite/map_elite.hpp new file mode 100644 index 000000000..7a1e5bb0f --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/map_elite/map_elite.hpp @@ -0,0 +1,284 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef MAP_ELITE_HPP_ +#define MAP_ELITE_HPP_ + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +namespace sferes +{ + namespace ea + { + // Main class + SFERES_EA(MapElite, EaCustom){ +public: + + typedef boost::shared_ptr indiv_t; + + typedef std::vector raw_pop_t; + typedef MapElite this_t; + + typedef typename std::vector pop_t; + typedef typename pop_t::iterator it_t; + typedef typename std::vector > front_t; + typedef boost::array point_t; + typedef boost::shared_ptr phen_t; + typedef boost::multi_array array_t; +// typedef boost::shared_ptr stat_t; + + static const size_t res_x = Params::ea::res_x; + static const size_t res_y = Params::ea::res_y; + + typedef Stat stat_t; + + MapElite() : + _array(boost::extents[res_x][res_y]), + _array_parents(boost::extents[res_x][res_y]), + _jumps(0) + { + } + + void random_pop() + { + // parallel::init(); We are not using TBB + + // Continuing a run + sferes::cont::Continuator continuator; + + bool continue_run = continuator.enabled() || this->_gen_file_path != ""; + + // Continuing a run manually from command line or continuing a run automatically if the job was pre-empted + if(continue_run) + { + // Load the population file + raw_pop_t raw_pop; + if (this->_gen_file_path == "") + { + raw_pop = continuator.getPopulationFromFile(*this); + } + else + { + raw_pop = continuator.getPopulationFromFile(*this, this->_gen_file_path); + } + + // Assign this pop also to the current map + for (size_t i = 0; i < raw_pop.size(); ++i) + { + _add_to_archive(raw_pop[i], raw_pop[i]); + } + + // Get the number of population to continue with + const size_t init_size = raw_pop.size(); + + // Resize the current population archive + this->_pop.resize(init_size); + + // Add loaded individuals to the new population + int i = 0; + BOOST_FOREACH(boost::shared_ptr&indiv, this->_pop) + { + indiv = boost::shared_ptr(new Phen(*raw_pop[i])); + ++i; + } + } + else // Run normally from gen = 0 + { + // Original Map-Elites code + // Intialize a random population + this->_pop.resize(Params::pop::init_size); + BOOST_FOREACH(boost::shared_ptr&indiv, this->_pop) + { + indiv = boost::shared_ptr(new Phen()); + indiv->random(); + } + } + + // Evaluate the initialized population + this->_eval.eval(this->_pop, 0, this->_pop.size()); + BOOST_FOREACH(boost::shared_ptr&indiv, this->_pop) + _add_to_archive(indiv, indiv); + + // Continue a run from a specific generation + if(continue_run) + { + if (this->_gen_file_path == "") + { + continuator.run_with_current_population(*this); + } + else + { + continuator.run_with_current_population(*this, this->_gen_file_path); + } + } + } + + //ADDED + void setGen(size_t gen) + { + this->_gen = gen; + } + //ADDED END + + void epoch() + { +#ifdef PHELOGENETIC_TREE + // We start with only 1 organism in order to construct the phylogenetic tree + // Thus, no evolution happening at generation 0 + if (this->_gen == 0) return; +#endif + + this->_pop.clear(); + + for (size_t i = 0; i < res_x; ++i) + { + for (size_t j = 0; j < res_y; ++j) + { + if (_array[i][j]) + { + this->_pop.push_back(_array[i][j]); + } + } + } + + pop_t ptmp, p_parents; + for (size_t i = 0; i < Params::pop::size / 2; ++i) + { + indiv_t p1 = _selection(this->_pop); + indiv_t p2 = _selection(this->_pop); + boost::shared_ptr i1, i2; + p1->cross(p2, i1, i2); + i1->mutate(); + i2->mutate(); + + /* + Phenotypes are to be developed in eval() called below + this->_eval.eval(ptmp, 0, ptmp.size()); + So no need to develop them here. + // i1->develop(); + // i2->develop(); + */ + + // Add the generation when these two new organisms are created (mutated) + i1->set_created_gen(this->_gen); + i2->set_created_gen(this->_gen); + + ptmp.push_back(i1); + ptmp.push_back(i2); + p_parents.push_back(p1); + p_parents.push_back(p2); + } + + this->_eval.eval(ptmp, 0, ptmp.size()); + + assert(ptmp.size() == p_parents.size()); + + for (size_t i = 0; i < ptmp.size(); ++i) + { + _add_to_archive(ptmp[i], p_parents[i]); + } + } + + const array_t& archive() const + { return _array;} + const array_t& parents() const + { return _array_parents;} + + const unsigned long jumps() const + { return _jumps;} + +protected: + array_t _array; + array_t _prev_array; + array_t _array_parents; + unsigned long _jumps; + + bool _add_to_archive(indiv_t i1, indiv_t parent) + { + bool added = false; // Flag raised when the individual is added to the archive in any cell + + // We have a map of 1x1000 for the total of 1000 categories + assert(1 == res_x); + assert(i1->fit().desc().size() == res_y); + + // Compare this individual with every top individual in every cell. + // If this individual is better, replace the current cell occupant with it. + for (int x = 0; x < res_x; ++x) + { + for (int y = 0; y < res_y; ++y) + { + float i1_fitness = i1->fit().value(y); + + if (!_array[x][y] || i1_fitness > _array[x][y]->fit().value(y)) + { + // Replace the current cell occupant with new individual and its parent + _array[x][y] = i1; + _array_parents[x][y] = parent; + + added = true; + + // Record a jump of an indiv to a cell + // One indiv could jump to many cells + _jumps++; + } + } + } + + return added; + } + + indiv_t _selection(const pop_t& pop) + { + int x1 = misc::rand< int > (0, pop.size()); + return pop[x1]; + } + +}; +} +} +#endif + diff --git a/modules/dnns_easily_fooled/sferes/modules/map_elite/plot_map.py b/modules/dnns_easily_fooled/sferes/modules/map_elite/plot_map.py new file mode 100644 index 000000000..478a73b31 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/map_elite/plot_map.py @@ -0,0 +1,85 @@ +import sys +import numpy as np + +import matplotlib.pyplot as plt +import matplotlib +from matplotlib.ticker import FuncFormatter + +cdict = {'red': [(0.0, 0.0, 0.0), + (0.33, 0.0, 0.0), + (0.66, 1.0, 1.0), + (1.0, 1.0, 1.0)], + 'blue': [(0.0, 0.0, 0.0), + (0.33, 1.0, 1.0), + (0.66, 0.0, 0.0), + (1.0, 0.0, 0.0)], + 'green': [(0.0, 0.0, 0.0), + (0.33, 0.0, 0.0), + (0.66, 0.0, 0.0), + (1.0, 1.0, 1.0)]} +my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256) + +def scale(x, pos): + 'The two args are the value and tick position' + return '%1.1f' % (x / 100.0) +def scale2(x, pos): + 'The two args are the value and tick position' + return '%1.1f' % (x / 100.0) + + + + + + +size = int(sys.argv[2]) + +x, y, z = np.loadtxt(sys.argv[1]).T + +data = np.zeros((size, size)) +m = 0 +x_m = 0 +y_m = 0 +for i in range(0, len(z)): + data[round(x[i] * size), round(y[i] * size)] = z[i] + if z[i] > m: + x_m = round(x[i] * size) + y_m = round(y[i] * size) + m = z[i] +data = np.ma.masked_where(data == 0, data) + +print "best:"+str(max(z)) + +def load_points(fname): + p_z, p_y, p_x = np.loadtxt(fname).T + p_x *= size + p_y *= size + p_p_x = [] + p_p_y = [] + np_p_x = [] + np_p_y = [] + + for i in range(0, len(p_x)): + if p_z[i] == 1.0: + p_p_x += [p_x[i]] + p_p_y += [p_y[i]] + else: + np_p_x += [p_x[i]] + np_p_y += [p_y[i]] + return p_p_x, p_p_y, np_p_x, np_p_y + + + +fig = plt.figure() +im = plt.imshow(data.T, origin='lower', cmap=my_cmap) +im.set_interpolation('nearest') +fig.subplots_adjust(top=0.98) +cb = plt.colorbar() +for t in cb.ax.get_xticklabels(): + t.set_fontsize(130) + + +ax = fig.add_subplot(111) +ax.yaxis.set_major_formatter(FuncFormatter(scale)) +ax.xaxis.set_major_formatter(FuncFormatter(scale2)) + +plt.savefig('heatmap.pdf') diff --git a/modules/dnns_easily_fooled/sferes/modules/map_elite/stat_map.hpp b/modules/dnns_easily_fooled/sferes/modules/map_elite/stat_map.hpp new file mode 100644 index 000000000..da31c7347 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/map_elite/stat_map.hpp @@ -0,0 +1,127 @@ +#ifndef STAT_MAP_HPP_ +#define STAT_MAP_HPP_ + +#include +#include +#include + +namespace sferes +{ + namespace stat + { + SFERES_STAT(Map, Stat) + { + public: + typedef boost::shared_ptr phen_t; + typedef boost::multi_array array_t; + typedef boost::array point_t; + + Map() : _xs(0), _ys(0) {} + template + void refresh(const E& ea) + { + _archive.clear(); + _xs = ea.archive().shape()[0]; + _ys = ea.archive().shape()[1]; + assert(_xs == Params::ea::res_x); + assert(_ys == Params::ea::res_y); + + for (size_t i = 0; i < _xs; ++i) + for (size_t j = 0; j < _ys; ++j) + { + phen_t p = ea.archive()[i][j]; + _archive.push_back(p); + } + + if (ea.gen() % Params::pop::dump_period == 0) + { + _write_archive(ea.archive(), std::string("archive_"), ea); +#ifdef MAP_WRITE_PARENTS + _write_parents(ea.archive(), ea.parents(), std::string("parents_"), ea); +#endif + } + } + void show(std::ostream& os, size_t k) + { + std::cerr << "loading "<< k / _ys << "," << k % _ys << std::endl; + if (_archive[k]) + { + _archive[k]->develop(); + _archive[k]->show(os); + _archive[k]->fit().set_mode(fit::mode::view); + _archive[k]->fit().eval(*_archive[k]); + } + else + std::cerr << "Warning, no point here" << std::endl; + } + template + void serialize(Archive& ar, const unsigned int version) + { + ar & BOOST_SERIALIZATION_NVP(_archive); + ar & BOOST_SERIALIZATION_NVP(_xs); + ar & BOOST_SERIALIZATION_NVP(_ys); + } + protected: + std::vector _archive; + int _xs, _ys; + + template + void _write_parents(const array_t& array, + const array_t& p_array, + const std::string& prefix, + const EA& ea) const + { + std::cout << "writing..." << prefix << ea.gen() << std::endl; + std::string fname = ea.res_dir() + "/" + + prefix + + boost::lexical_cast< + std::string>(ea.gen()) + + std::string(".dat"); + std::ofstream ofs(fname.c_str()); + for (size_t i = 0; i < _xs; ++i) + for (size_t j = 0; j < _ys; ++j) + if (array[i][j] && p_array[i][j]) + { + point_t p = _get_point(p_array[i][j]); + size_t x = round(p[0] * _xs); + size_t y = round(p[1] * _ys); + ofs << i / (float) _xs + << " " << j / (float) _ys + << " " << p_array[i][j]->fit().value() + << " " << x / (float) _xs + << " " << y / (float) _ys + << " " << array[i][j]->fit().value() + << std::endl; + } + } + + template + void _write_archive(const array_t& array, + const std::string& prefix, + const EA& ea) const + { + std::cout << "writing..." << prefix << ea.gen() << std::endl; + std::string fname = ea.res_dir() + "/" + + prefix + + boost::lexical_cast< + std::string>(ea.gen()) + + std::string(".dat"); + + std::ofstream ofs(fname.c_str()); + for (size_t i = 0; i < _xs; ++i) + for (size_t j = 0; j < _ys; ++j) + if (array[i][j]) + { + ofs << i / (float) _xs + << " " << j / (float) _ys + << " " << array[i][j]->fit().value() + << std::endl; + } + } + + + }; + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/map_elite/test_map_elite.cpp b/modules/dnns_easily_fooled/sferes/modules/map_elite/test_map_elite.cpp new file mode 100644 index 000000000..8ad35df3d --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/map_elite/test_map_elite.cpp @@ -0,0 +1,124 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE map_elite + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "map_elite.hpp" +#include "fit_map.hpp" +#include "stat_map.hpp" + +using namespace sferes::gen::evo_float; + + +struct Params +{ + struct ea + { + SFERES_CONST size_t res_x = 256; + SFERES_CONST size_t res_y = 256; + }; + struct pop + { + // number of initial random points + SFERES_CONST size_t init_size = 1000; + // size of a batch + SFERES_CONST size_t size = 2000; + SFERES_CONST size_t nb_gen = 5001; + SFERES_CONST size_t dump_period = 1000; + }; + struct parameters + { + SFERES_CONST float min = -5; + SFERES_CONST float max = 5; + }; + struct evo_float + { + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float eta_m = 10.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + +}; + + +// Rastrigin +FIT_MAP(Rastrigin) +{ + public: + template + void eval(Indiv& ind) + { + float f = 10 * ind.size(); + for (size_t i = 0; i < ind.size(); ++i) + f += ind.data(i) * ind.data(i) - 10 * cos(2 * M_PI * ind.data(i)); + this->_value = -f; + this->set_desc(ind.gen().data(0), ind.gen().data(1)); + } +}; + +//BOOST_AUTO_TEST_CASE(map_elite) +//{ +// using namespace sferes; +// +// typedef Rastrigin fit_t; +// typedef gen::EvoFloat<10, Params> gen_t; +// typedef phen::Parameters phen_t; +// typedef eval::Parallel eval_t; +// typedef boost::fusion::vector, stat::BestFit > stat_t; +// typedef modif::Dummy<> modifier_t; +// typedef ea::MapElite ea_t; +// +// ea_t ea; +// +// ea.run(); +// +//} + + diff --git a/modules/dnns_easily_fooled/sferes/modules/map_elite/wscript b/modules/dnns_easily_fooled/sferes/modules/map_elite/wscript new file mode 100644 index 000000000..0123d4d54 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/map_elite/wscript @@ -0,0 +1,51 @@ +#! /usr/bin/env python +#| This file is a part of the sferes2 framework. +#| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +#| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +#| +#| This software is a computer program whose purpose is to facilitate +#| experiments in evolutionary computation and evolutionary robotics. +#| +#| This software is governed by the CeCILL license under French law +#| and abiding by the rules of distribution of free software. You +#| can use, modify and/ or redistribute the software under the terms +#| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +#| following URL "http://www.cecill.info". +#| +#| As a counterpart to the access to the source code and rights to +#| copy, modify and redistribute granted by the license, users are +#| provided only with a limited warranty and the software's author, +#| the holder of the economic rights, and the successive licensors +#| have only limited liability. +#| +#| In this respect, the user's attention is drawn to the risks +#| associated with loading, using, modifying and/or developing or +#| reproducing the software by the user in light of its specific +#| status of free software, that may mean that it is complicated to +#| manipulate, and that also therefore means that it is reserved for +#| developers and experienced professionals having in-depth computer +#| knowledge. Users are therefore encouraged to load and test the +#| software's suitability as regards their requirements in conditions +#| enabling the security of their systems and/or data to be ensured +#| and, more generally, to use and operate it in the same conditions +#| as regards security. +#| +#| The fact that you are presently reading this means that you have +#| had knowledge of the CeCILL license and that you accept its terms. + +import os + +def set_options(blah) : pass + +def configure(blah): pass + +def build(bld): + print ("Entering directory `" + os.getcwd() + "/modules/'") + test_map_elite = bld.new_task_gen('cxx', 'program') + test_map_elite.source = 'test_map_elite.cpp' + test_map_elite.includes = '. ../../' + test_map_elite.uselib_local = 'sferes2' + test_map_elite.uselib = 'EIGEN3 BOOST BOOST_UNIT_TEST_FRAMEWORK' + test_map_elite.target = 'test_map_elite' + test_map_elite.unit_test = 1 + test_map_elite.cxxflags = ['-std=c++11'] diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/README.md b/modules/dnns_easily_fooled/sferes/modules/nn2/README.md new file mode 100644 index 000000000..f0cf3db2d --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/README.md @@ -0,0 +1,34 @@ +nn2 +=== + +** This is a Sferes2 module ** + +NN2 is a generic toolbox for evolving neural networks (it depends on sferes2). + +*If you use this software in an academic article, please cite:* + +Mouret, J.-B. and Doncieux, S. (2012). Encouraging Behavioral Diversity in Evolutionary Robotics: an Empirical Study. Evolutionary Computation. Vol 20 No 1 Pages 91-133. + +### Usage & installation +- copy nn2 to the "modules" directory in the sferes2 root directory. +- add nn2 in modules.conf in the sferes2 root directory +- run ./waf configure and ./waf build + + +### Academic paper that uses nn2 +* Please note that many of these papers extends nn2 * + + + + +- Tonelli, P. and Mouret, J.-B. (2013). On the Relationships between Generative Encodings, Regularity, and Learning Abilities when Evolving Plastic Artificial Neural Networks. PLoS One. Vol 8 No 11 Pages e79138. +- Clune*, J. and Mouret, J.-B. and Lipson, H. (2013). The evolutionary origins of modularity. Proceedings of the Royal Society B. Vol 280 (J. Clune and J.-B. Mouret contributed equally to this work) Pages 20122863. +- Doncieux, S. and Mouret, J.B. (2013). Behavioral Diversity with Multiple Behavioral Distances. Proc. of IEEE Congress on Evolutionary Computation, 2013 (CEC 2013). Pages 1-8. +- Mouret, J.-B. and Doncieux, S. (2012). Encouraging Behavioral Diversity in Evolutionary Robotics: an Empirical Study. Evolutionary Computation. Vol 20 No 1 Pages 91-133. +- Ollion, Charles and Doncieux, Stéphane (2012). Towards Behavioral Consistency in Neuroevolution. From Animals to Animats: Proceedings of the 12th International Conference on Adaptive Behaviour (SAB 2012), Springer, publisher. Pages 1-10. +- Ollion, C. and Pinville, T. and Doncieux, S. (2012). With a little help from selection pressures: evolution of memory in robot controllers. Proc. Alife XIII. Pages 1-8. +- Mouret, J.-B. (2011). Novelty-based Multiobjectivization. New Horizons in Evolutionary Robotics: Extended Contributions from the 2009 EvoDeRob Workshop, Springer, publisher. Pages 139--154. +- Pinville, T. and Koos, S. and Mouret, J-B. and Doncieux, S. (2011). How to Promote Generalisation in Evolutionary Robotics: the ProGAb Approach. +GECCO'11: Proceedings of the 13th annual conference on Genetic and evolutionary computation ACM, publisher . Pages 259--266 +- Mouret, J.-B. and Doncieux, S. and Girard, B. (2010). Importing the Computational Neuroscience Toolbox into Neuro-Evolution---Application to Basal Ganglia. GECCO'10: Proceedings of the 12th annual conference on Genetic and evolutionary computation ACM, publisher . Pages 587--594. +- Doncieux, S. and Mouret, J.-B. (2010). Behavioral diversity measures for Evolutionary Robotics. WCCI 2010 IEEE World Congress on Computational Intelligence, Congress on Evolutionary Computation (CEC). Pages 1303--1310. \ No newline at end of file diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/af.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/af.hpp new file mode 100644 index 000000000..2db9d93a0 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/af.hpp @@ -0,0 +1,127 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef _NN_AF_HPP_ +#define _NN_AF_HPP_ + +#include "params.hpp" + +// classic activation functions +namespace nn +{ + template + class Af + { + public: + typedef P params_t; + const params_t& get_params() const { return _params; } + params_t& get_params() { return _params; } + void set_params(const params_t& params) { _params = params; } + void init() {} + Af() {} + protected: + params_t _params; + }; + + // -1 to +1 sigmoid + template + struct AfTanh : public Af

+ { + typedef P params_t; + BOOST_STATIC_CONSTEXPR float lambda = 5.0f; + AfTanh() { assert(trait

::size(this->_params) == 1); } + float operator()(float p) const + { + return tanh(p * lambda + trait

::single_value(this->_params)); + } + protected: + }; + // -1 to +1 sigmoid + template + struct AfTanhNoBias : public Af

+ { + typedef params::Dummy params_t; + BOOST_STATIC_CONSTEXPR float lambda = 5.0f; + AfTanhNoBias() { } + float operator()(float p) const + { + return tanh(p * lambda); + } + }; + + + + template + struct AfSigmoidNoBias : public Af<> + { + typedef params::Dummy params_t; + BOOST_STATIC_CONSTEXPR float lambda = 5.0f; + AfSigmoidNoBias() { } + float operator()(float p) const { return 1.0 / (exp(-p * lambda) + 1); } + protected: + }; + + template + struct AfSigmoidBias : public Af

+ { + typedef P params_t; + BOOST_STATIC_CONSTEXPR float lambda = 5.0f; + AfSigmoidBias() { assert(this->_params.size() == 1); } + float operator()(float p) const + { + return 1.0 / (exp(-p + trait

::single_value(this->_params) * lambda) + 1); + } + protected: + }; + + // copy input to output + // store an arbitrary parameter + template + struct AfDirect : public Af

+ { + typedef P params_t; + float operator()(float p) const { return p; } + }; + + // copy input to output + template + struct AfDirectT : public Af + { + typedef params::Dummy params_t; + T operator()(T p) const { return p; } + }; + +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/af_cppn.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/af_cppn.hpp new file mode 100644 index 000000000..cbbaf366d --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/af_cppn.hpp @@ -0,0 +1,121 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef AF_CPPN_HPP_ +#define AF_CPPN_HPP_ + +#include +#include + +// classic activation functions +namespace nn +{ + namespace cppn + { + enum func_e { sine = 0, sigmoid, gaussian, linear, tanh }; + SFERES_CONST size_t nb_functions = 3; + SFERES_CLASS(AfParams) + { + public: + void set(float t, float p) + { + _type.set_data(0, t); + _param.data(0, p); + } + void mutate() + { + _type.mutate(); + _param.mutate(); + } + void random() + { + _type.random(); + _param.random(); + } + void develop() { + } + int type() const { + return _type.data(0); + } + float param() const { + return _param.data(0); + } + template + void serialize(A& ar, unsigned int v) + { + ar& BOOST_SERIALIZATION_NVP(_type); + ar& BOOST_SERIALIZATION_NVP(_param); + } + protected: + sferes::gen::Sampled<1, Params> _type; + sferes::gen::EvoFloat<1, Params> _param; + }; + } + + // Activation function for Compositional Pattern Producing Networks + template + struct AfCppn : public Af

+ { + typedef P params_t; + float operator() (float p) const + { + float s = p > 0 ? 1 : -1; + //std::cout<<"type:"<_params.type()<<" p:"<_params.param(), 2))<_params.type()) + { + case cppn::sine: + return sin(p); + case cppn::sigmoid: + return ((1.0 / (1.0 + exp(-p))) - 0.5) * 2.0; + case cppn::gaussian: + return exp(-powf(p, 2)); + case cppn::linear: + return std::min(std::max(p, -3.0f), 3.0f) / 3.0f; + case cppn::tanh: + return tanh(p * 5.0f); + default: + assert(0); + } + return 0; + } + }; + + +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/bench_nn.cpp b/modules/dnns_easily_fooled/sferes/modules/nn2/bench_nn.cpp new file mode 100644 index 000000000..b102b1743 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/bench_nn.cpp @@ -0,0 +1,70 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#include "nn.hpp" + +int main() +{ + using namespace nn; + typedef NN, AfTanh >, Connection<> > nn_t; + + nn_t nn; + + BOOST_STATIC_CONSTEXPR size_t nb_io = 5; + BOOST_STATIC_CONSTEXPR size_t nb_h = 100; + + nn.set_nb_inputs(nb_io); + nn.set_nb_outputs(nb_io); + + std::vector neurons; + for (size_t i = 0; i < nb_h; ++i) + neurons.push_back(nn.add_neuron("n")); + + for (size_t i = 0; i < nn.get_nb_inputs(); ++i) + for (size_t j = 0; j < neurons.size(); ++j) + nn.add_connection(nn.get_input(i), neurons[j], 1.0f); + + for (size_t i = 0; i < nn.get_nb_outputs(); ++i) + for (size_t j = 0; j < neurons.size(); ++j) + nn.add_connection(neurons[j], nn.get_output(i), 0.20f); + + std::vector in(nn.get_nb_inputs()); + nn.init(); + std::fill(in.begin(), in.end(), 1.0f); + size_t nb_steps = 50000; + for (size_t i = 0; i < nb_steps; ++i) + nn.step(in); + return 0; +} diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/connection.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/connection.hpp new file mode 100644 index 000000000..9e74646ef --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/connection.hpp @@ -0,0 +1,55 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef _NN_CONNECTiON_HPP +#define _NN_CONNECTiON_HPP + +#include "params.hpp" + +namespace nn +{ + template + struct Connection + { + typedef W weight_t; + typedef IO io_t; + const weight_t& get_weight() const { return _weight; } + weight_t& get_weight() { return _weight; } + void set_weight(const weight_t& w) { _weight = w; } + protected: + weight_t _weight; + }; +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/elman.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/elman.hpp new file mode 100644 index 000000000..841b7237a --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/elman.hpp @@ -0,0 +1,135 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef _NN_ELMAN_HPP_ +#define _NN_ELMAN_HPP_ + +#include "nn.hpp" + +namespace nn +{ + // a "modified" Elman network with self-recurrent context units + // E.g. : Training Elman and Jordan networks for system + // identification using genetic algorithms + // Artificial Intelligence in Engineering + // Volume 13, Issue 2, April 1999, Pages 107-117 + // first input is a BIAS input (it should be set to 1) + template + class Elman : public NN + { + public: + typedef nn::NN nn_t; + typedef typename nn_t::io_t io_t; + typedef typename nn_t::vertex_desc_t vertex_desc_t; + typedef typename nn_t::edge_desc_t edge_desc_t; + typedef typename nn_t::adj_it_t adj_it_t; + typedef typename nn_t::graph_t graph_t; + typedef N neuron_t; + typedef C conn_t; + + Elman(size_t nb_inputs, + size_t nb_hidden, + size_t nb_outputs) + { + // neurons + this->set_nb_inputs(nb_inputs + 1); + this->set_nb_outputs(nb_outputs); + for (size_t i = 0; i < nb_hidden; ++i) + _hidden_neurons. + push_back(this->add_neuron(std::string("h") + + boost::lexical_cast(i))); + for (size_t i = 0; i < nb_hidden; ++i) + _context_neurons. + push_back(this->add_neuron(std::string("c") + + boost::lexical_cast(i))); + // connections + this->full_connect(this->_inputs, this->_hidden_neurons, + trait::zero()); + this->full_connect(this->_hidden_neurons, this->_outputs, + trait::zero()); + this->connect(this->_hidden_neurons, this->_context_neurons, + trait::zero()); + this->connect(this->_context_neurons, this->_context_neurons, + trait::zero()); + this->full_connect(this->_context_neurons, this->_hidden_neurons, + trait::zero()); + // bias + // (hidden layer is already connect to input(0)) + size_t last = this->get_nb_inputs(); + for (size_t i = 0; i < _context_neurons.size(); ++i) + this->add_connection(this->get_input(last), _context_neurons[i], + trait::zero()); + for (size_t i = 0; i < this->get_nb_outputs(); ++i) + this->add_connection(this->get_input(last), this->get_output(i), + trait::zero()); + } + unsigned get_nb_inputs() const { return this->_inputs.size() - 1; } + void step(const std::vector& in) + { + assert(in.size() == this->get_nb_inputs()); + std::vector inf = in; + inf.push_back(1.0f); + nn_t::_step(inf); + } + protected: + + std::vector _hidden_neurons; + std::vector _context_neurons; + + }; + namespace elman + { + template + struct Count + { + SFERES_CONST int nb_inputs = NbInputs + 1; // bias is an input + SFERES_CONST int nb_outputs = NbOutputs; + SFERES_CONST int nb_hidden = NbHidden; + SFERES_CONST int nb_params = + nb_inputs * nb_hidden // input to hidden (full) + + nb_hidden * nb_outputs // hidden to output (full) + + nb_hidden // hidden to context (1-1) + + nb_hidden // context to itself (1-1) + + nb_hidden * nb_hidden // context to hidden (full) + + nb_hidden // bias context + + nb_outputs; // bias outputs + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/gen_dnn.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/gen_dnn.hpp new file mode 100644 index 000000000..79f25d1e7 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/gen_dnn.hpp @@ -0,0 +1,421 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef DNN_HPP_ +#define DNN_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nn.hpp" +#include "trait.hpp" + +namespace sferes +{ + namespace gen + { + + template + typename boost::graph_traits::vertex_descriptor + random_vertex(Graph& g) + { + assert(num_vertices(g)); + using namespace boost; + if (num_vertices(g) > 1) + { + std::size_t n = misc::rand(num_vertices(g)); + typename graph_traits::vertex_iterator i = vertices(g).first; + while (n-- > 0) ++i; + return *i; + } + else + return *vertices(g).first; + } + + template + typename boost::graph_traits::edge_descriptor + random_edge(Graph& g) + { + assert(num_edges(g)); + using namespace boost; + if (num_edges(g) > 1) + { + std::size_t n = misc::rand(num_edges(g)); + typename graph_traits::edge_iterator i = edges(g).first; + while (n-- > 0) ++i; + return *i; + } + else + return *edges(g).first; + } + + namespace dnn + { + enum init_t { ff = 0, random_topology }; + } + template + class Dnn : public nn::NN + { + public: + typedef nn::NN nn_t; + typedef N neuron_t; + typedef C conn_t; + typedef typename nn_t::io_t io_t; + typedef typename nn_t::weight_t weight_t; + typedef typename nn_t::vertex_desc_t vertex_desc_t; + typedef typename nn_t::edge_desc_t edge_desc_t; + typedef typename nn_t::adj_it_t adj_it_t; + typedef typename nn_t::graph_t graph_t; + void random() + { + if (Params::dnn::init == dnn::ff) + _random_ff(Params::dnn::nb_inputs, Params::dnn::nb_outputs); + else + _random(Params::dnn::nb_inputs, Params::dnn::nb_outputs, + Params::dnn::min_nb_neurons, Params::dnn::max_nb_neurons, + Params::dnn::min_nb_conns, Params::dnn::max_nb_conns); + } + + void mutate() + { + _change_conns(); + + _change_neurons(); + + if (misc::rand() < Params::dnn::m_rate_add_conn) + _add_conn_nodup(); + + if (misc::rand() < Params::dnn::m_rate_del_conn) + _del_conn(); + + if (misc::rand() < Params::dnn::m_rate_add_neuron) + _add_neuron_on_conn(); + + if (misc::rand() < Params::dnn::m_rate_del_neuron) + _del_neuron(); + + } + void cross(const Dnn& o, Dnn& c1, Dnn& c2) + { +#ifdef PHELOGENETIC_TREE + c1 = *this; + c2 = o; +#else + if (misc::flip_coin()) + { + c1 = *this; + c2 = o; + } + else + { + c2 = *this; + c1 = o; + } +#endif + } + // serialize the graph "by hand"... + template + void save(Archive& a, const unsigned v) const + { + dbg::trace("nn", DBG_HERE); + std::vector inputs; + std::vector outputs; + std::vector afparams; + std::vector pfparams; + std::map nmap; + std::vector > conns; + std::vector weights; + + BGL_FORALL_VERTICES_T(v, this->_g, graph_t) + { + if (this->is_input(v)) + inputs.push_back(afparams.size()); + if (this->is_output(v)) + outputs.push_back(afparams.size()); + nmap[v] = afparams.size(); + afparams.push_back(this->_g[v].get_afparams()); + pfparams.push_back(this->_g[v].get_pfparams()); + } + BGL_FORALL_EDGES_T(e, this->_g, graph_t) + { + conns.push_back(std::make_pair(nmap[source(e, this->_g)], + nmap[target(e, this->_g)])); + weights.push_back(this->_g[e].get_weight()); + } + assert(pfparams.size() == afparams.size()); + assert(weights.size() == conns.size()); + + a & BOOST_SERIALIZATION_NVP(afparams); + a & BOOST_SERIALIZATION_NVP(pfparams); + a & BOOST_SERIALIZATION_NVP(weights); + a & BOOST_SERIALIZATION_NVP(conns); + a & BOOST_SERIALIZATION_NVP(inputs); + a & BOOST_SERIALIZATION_NVP(outputs); + } + template + void load(Archive& a, const unsigned v) + { + dbg::trace("nn", DBG_HERE); + std::vector inputs; + std::vector outputs; + std::vector afparams; + std::vector pfparams; + std::map nmap; + std::vector > conns; + std::vector weights; + + a & BOOST_SERIALIZATION_NVP(afparams); + a & BOOST_SERIALIZATION_NVP(pfparams); + a & BOOST_SERIALIZATION_NVP(weights); + a & BOOST_SERIALIZATION_NVP(conns); + a & BOOST_SERIALIZATION_NVP(inputs); + a & BOOST_SERIALIZATION_NVP(outputs); + + assert(pfparams.size() == afparams.size()); + + assert(weights.size() == conns.size()); + this->set_nb_inputs(inputs.size()); + this->set_nb_outputs(outputs.size()); + for (size_t i = 0; i < this->get_nb_inputs(); ++i) + nmap[inputs[i]] = this->get_input(i); + for (size_t i = 0; i < this->get_nb_outputs(); ++i) + nmap[outputs[i]] = this->get_output(i); + + for (size_t i = 0; i < afparams.size(); ++i) + if (std::find(inputs.begin(), inputs.end(), i) == inputs.end() + && std::find(outputs.begin(), outputs.end(), i) == outputs.end()) + nmap[i] = this->add_neuron("n", pfparams[i], afparams[i]); + else + { + this->_g[nmap[i]].set_pfparams(pfparams[i]); + this->_g[nmap[i]].set_afparams(afparams[i]); + } + + + //assert(nmap.size() == num_vertices(this->_g)); + for (size_t i = 0; i < conns.size(); ++i) + this->add_connection(nmap[conns[i].first], nmap[conns[i].second], weights[i]); + } + BOOST_SERIALIZATION_SPLIT_MEMBER(); + + protected: + void _random_neuron_params() + { + BGL_FORALL_VERTICES_T(v, this->_g, graph_t) + { + this->_g[v].get_pfparams().random(); + this->_g[v].get_afparams().random(); + } + } + // we start with a fully connected 0-layer perceptron with + // random weights + void _random_ff(size_t nb_inputs, size_t nb_outputs) + { + this->set_nb_inputs(nb_inputs); + this->set_nb_outputs(nb_outputs); + + BOOST_FOREACH(vertex_desc_t& i, this->_inputs) + BOOST_FOREACH(vertex_desc_t& o, this->_outputs) + this->add_connection(i, o, _random_weight()); + + _random_neuron_params(); + } + + void _random(size_t nb_inputs, size_t nb_outputs, + size_t min_nb_neurons, size_t max_nb_neurons, + size_t min_nb_conns, size_t max_nb_conns) + { + // io + this->set_nb_inputs(nb_inputs); + this->set_nb_outputs(nb_outputs); + _random_neuron_params(); + + // neurons + size_t nb_neurons = misc::rand(min_nb_neurons, max_nb_neurons); + for (size_t i = 0; i < nb_neurons; ++i) + _add_neuron();//also call the random params + + // conns + size_t nb_conns = misc::rand(min_nb_conns, max_nb_conns); + for (size_t i = 0; i < nb_conns; ++i) + _add_conn_nodup(); + + this->simplify(); + } + + vertex_desc_t _random_tgt() + { + vertex_desc_t v; + do + v = random_vertex(this->_g); + while (this->is_input(v)); + return v; + } + vertex_desc_t _random_src() + { + vertex_desc_t v; + do + v = random_vertex(this->_g); + while (this->is_output(v)); + return v; + } + + vertex_desc_t _add_neuron() + { + vertex_desc_t v = this->add_neuron("n"); + this->_g[v].get_pfparams().random(); + this->_g[v].get_afparams().random(); + return v; + } + + vertex_desc_t _add_neuron_on_conn() + { + if (!num_edges(this->_g)) + return (vertex_desc_t)0x0; + edge_desc_t e = random_edge(this->_g); + vertex_desc_t src = source(e, this->_g); + vertex_desc_t tgt = target(e, this->_g); + typename nn_t::weight_t w = this->_g[e].get_weight(); + vertex_desc_t n = this->add_neuron("n"); + this->_g[n].get_pfparams().random(); + this->_g[n].get_afparams().random(); + // + remove_edge(e, this->_g); + this->add_connection(src, n, w);// todo : find a kind of 1 ?? + this->add_connection(n, tgt, w); + return n; + } + + void _del_neuron() + { + assert(num_vertices(this->_g)); + + if (this->get_nb_neurons() <= this->get_nb_inputs() + this->get_nb_outputs()) + return; + vertex_desc_t v; + do + v = random_vertex(this->_g); + while (this->is_output(v) || this->is_input(v)); + + clear_vertex(v, this->_g); + remove_vertex(v, this->_g); + } + typename nn_t::weight_t _random_weight() + { + typename nn_t::weight_t w; + w.random(); + return w; + } + void _add_conn() + { + this->add_connection(_random_src(), _random_tgt(), _random_weight()); + } + // add a random connection by avoiding to duplicate an existent connection + void _add_conn_nodup() + { + vertex_desc_t src, tgt; + // this is only an upper bound; a connection might of course + // be possible even after max_tries tries. + size_t max_tries = num_vertices(this->_g) * num_vertices(this->_g), + nb_tries = 0; + do + { + src = _random_src(); + tgt = _random_tgt(); + } + while (is_adjacent(this->_g, src, tgt) && ++nb_tries < max_tries); + if (nb_tries < max_tries) + { + typename nn_t::weight_t w; + w.random(); + this->add_connection(src, tgt, w); + } + } + void _del_conn() + { + if (!this->get_nb_connections()) + return; + remove_edge(random_edge(this->_g), this->_g); + } + void _change_neurons() + { + BGL_FORALL_VERTICES_T(v, this->_g, graph_t) + { + this->_g[v].get_afparams().mutate(); + this->_g[v].get_pfparams().mutate(); + } + } + + // No dup version + void _change_conns() + { + BGL_FORALL_EDGES_T(e, this->_g, graph_t) + this->_g[e].get_weight().mutate(); + + BGL_FORALL_EDGES_T(e, this->_g, graph_t) + if (misc::rand() < Params::dnn::m_rate_change_conn) + { + vertex_desc_t src = source(e, this->_g); + vertex_desc_t tgt = target(e, this->_g); + typename nn_t::weight_t w = this->_g[e].get_weight(); + remove_edge(e, this->_g); + int max_tries = num_vertices(this->_g) * num_vertices(this->_g), + nb_tries = 0; + if (misc::flip_coin()) + do + src = _random_src(); + while(++nb_tries < max_tries && is_adjacent(this->_g, src, tgt)); + else + do + tgt = _random_tgt(); + while(++nb_tries < max_tries && is_adjacent(this->_g, src, tgt)); + if (nb_tries < max_tries) + this->add_connection(src, tgt, w); + return; + } + } + }; +} +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/gen_dnn_ff.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/gen_dnn_ff.hpp new file mode 100644 index 000000000..9577be28d --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/gen_dnn_ff.hpp @@ -0,0 +1,211 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef DNN_FF_HPP_ +#define DNN_FF_HPP_ + +#include +#include +#include +#include +#include +#include + +namespace sferes +{ + namespace gen + { + template + class DnnFF : public Dnn + { + public: + typedef nn::NN nn_t; + typedef N neuron_t; + typedef C conn_t; + typedef typename nn_t::io_t io_t; + typedef typename nn_t::vertex_desc_t vertex_desc_t; + typedef typename nn_t::edge_desc_t edge_desc_t; + typedef typename nn_t::graph_t graph_t; + DnnFF() {} + DnnFF& operator=(const DnnFF& o) + { + static_cast& >(*this) + = static_cast& >(o); + return *this; + } + DnnFF(const DnnFF& o) + { *this = o; } + void init() + { + Dnn::init(); + _compute_depth(); + } + void random() + { + assert(Params::dnn::init == dnn::ff); + this->_random_ff(Params::dnn::nb_inputs, Params::dnn::nb_outputs); + _make_all_vertices(); + } + void mutate() + { + _change_conns(); + this->_change_neurons(); + + if (misc::rand() < Params::dnn::m_rate_add_conn) + _add_conn(); + + if (misc::rand() < Params::dnn::m_rate_del_conn) + this->_del_conn(); + + if (misc::rand() < Params::dnn::m_rate_add_neuron) + this->_add_neuron_on_conn(); + + if (misc::rand() < Params::dnn::m_rate_del_neuron) + this->_del_neuron(); + } + + void cross(const DnnFF& o, DnnFF& c1, DnnFF& c2) + { + if (misc::flip_coin()) + { + c1 = *this; + c2 = o; + } + else + { + c2 = *this; + c1 = o; + } + } + size_t get_depth() const { return _depth; } + protected: + std::set _all_vertices; + size_t _depth; + + void _make_all_vertices() + { + _all_vertices.clear(); + BGL_FORALL_VERTICES_T(v, this->_g, graph_t) + _all_vertices.insert(v); + } + void _change_conns() + { + BGL_FORALL_EDGES_T(e, this->_g, graph_t) + this->_g[e].get_weight().mutate(); + } + + + // add only feed-forward connections + void _add_conn() + { + using namespace boost; + vertex_desc_t v = this->_random_src(); + std::set preds; + nn::bfs_pred_visitor vis(preds); + breadth_first_search(make_reverse_graph(this->_g), + v, color_map(get(&N::_color, this->_g)).visitor(vis)); + _make_all_vertices(); + std::set tmp, avail, in; + // avoid to connect to predecessors + std::set_difference(_all_vertices.begin(), _all_vertices.end(), + preds.begin(), preds.end(), + std::insert_iterator >(tmp, tmp.begin())); + // avoid to connect to inputs + BOOST_FOREACH(vertex_desc_t v, this->_inputs) // inputs need + // to be sorted + in.insert(v); + std::set_difference(tmp.begin(), tmp.end(), + in.begin(), in.end(), + std::insert_iterator >(avail, avail.begin())); + + if (avail.empty()) + return; + vertex_desc_t tgt = *misc::rand_l(avail); + typename nn_t::weight_t w; + w.random(); + this->add_connection(v, tgt, w); + } + + // useful to make the right number of steps + void _compute_depth() + { + using namespace boost; + typedef std::map int_map_t; + typedef std::map vertex_map_t; + typedef std::map color_map_t; + typedef std::map edge_map_t; + + typedef associative_property_map a_map_t; + typedef associative_property_map c_map_t; + typedef associative_property_map v_map_t; + typedef associative_property_map e_map_t; + + color_map_t cm; c_map_t cmap(cm); + vertex_map_t vm; v_map_t pmap(vm); + edge_map_t em; + BGL_FORALL_EDGES_T(e, this->_g, graph_t) + em[e] = 1; + e_map_t wmap(em); + _depth = 0; + // we compute the longest path between inputs and outputs + BOOST_FOREACH(vertex_desc_t s, this->_inputs) + { + int_map_t im; a_map_t dmap(im); + dag_shortest_paths + (this->_g, s, dmap, wmap, cmap, pmap, + dijkstra_visitor(), + std::greater(), + closed_plus(), + std::numeric_limits::min(), 0); + + BGL_FORALL_VERTICES_T(v, this->_g, graph_t) + { + size_t d = get(dmap, v); + if (this->_g[v].get_out() != -1 && d <= num_vertices(this->_g)) + _depth = std::max(_depth, d); + } + } + // add one to be sure + _depth ++; + } + + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/gen_hyper_nn.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/gen_hyper_nn.hpp new file mode 100644 index 000000000..68dfdb88e --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/gen_hyper_nn.hpp @@ -0,0 +1,83 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef GEN_HYPER_NN_HPP_ +#define GEN_HYPER_NN_HPP_ + +#include "neuron.hpp" +#include "pf.hpp" + +#include "gen_dnn_ff.hpp" +#include "af_cppn.hpp" + +namespace sferes +{ + namespace gen + { + template + class HyperNn : public DnnFF, + nn::AfCppn > >, + nn::Connection, + Params> + { + public: + typedef DnnFF, + nn::AfCppn > >, + nn::Connection, + Params> nn_t; + typedef typename nn_t::neuron_t neuron_t; + typedef typename nn_t::conn_t conn_t; + void init() + { + BGL_FORALL_EDGES_T(e, this->get_graph(), + typename nn_t::graph_t) + this->get_graph()[e].get_weight().develop(); + + // develop the parameters + BGL_FORALL_VERTICES_T(v, this->get_graph(), + typename nn_t::graph_t) + { + this->get_graph()[v].get_afparams().develop(); + this->get_graph()[v].get_pfparams().develop(); + } + nn_t::init(); + } + }; + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/io_trait.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/io_trait.hpp new file mode 100644 index 000000000..58b3d7408 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/io_trait.hpp @@ -0,0 +1,111 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef _IO_TRAIT_HPP_ +#define _IO_TRAIT_HPP_ + +// for std::pair +#include +#include + +#ifdef EIGEN3_ENABLED +#include +#endif + +namespace nn +{ + // io trait + template + struct io_trait + { + static T zero() { return T(0.0f); } + static T zero(size_t k) { return zero(); } + typedef std::valarray vector_t; + }; + +#ifdef EIGEN3_ENABLED + // go with eigen with float (TODO : double) + template<> + struct io_trait + { + typedef Eigen::VectorXf vector_t; + static float zero() { return 0.0f; } + static vector_t zero(size_t k) { return Eigen::VectorXf::Zero(k); } + + }; +#endif + + template<> + struct io_trait > + { + static std::pair zero() { return std::make_pair(0.0f, 0.0f); } + typedef std::valarray > vector_t; + }; + + // useful but wrong place (?) + template + std::basic_ostream<_CharT, _Traits>& + operator<<(std::basic_ostream<_CharT, _Traits>& ofs, const std::pair& p) + { + return ofs< + std::basic_ostream<_CharT, _Traits>& + operator<<(std::basic_ostream<_CharT, _Traits>& ofs, const std::vector& p) + { + for (size_t i = 0; i < p.size(); ++i) + ofs< + std::istream& operator>>(std::istream& ifs, std::pair& p) + { + T1 t1; + T2 t2; + ifs >> t1; + ifs >> t2; + return std::make_pair(t1, t2); + } + + +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/mlp.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/mlp.hpp new file mode 100644 index 000000000..8ca01a79e --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/mlp.hpp @@ -0,0 +1,100 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef _NN_MLP_HPP_ +#define _NN_MLP_HPP_ + +#include "nn.hpp" +#include "connection.hpp" +#include "neuron.hpp" + +namespace nn +{ + // a basic multi-layer perceptron (feed-forward neural network) + // only one hidden layer in this version + // there's one autmatically added input for the bias + template + class Mlp : public NN + { + public: + typedef nn::NN nn_t; + typedef typename nn_t::io_t io_t; + typedef typename nn_t::vertex_desc_t vertex_desc_t; + typedef typename nn_t::edge_desc_t edge_desc_t; + typedef typename nn_t::adj_it_t adj_it_t; + typedef typename nn_t::graph_t graph_t; + typedef N neuron_t; + typedef C conn_t; + + Mlp(size_t nb_inputs, + size_t nb_hidden, + size_t nb_outputs) + { + // neurons + this->set_nb_inputs(nb_inputs + 1); + this->set_nb_outputs(nb_outputs); + for (size_t i = 0; i < nb_hidden; ++i) + _hidden_neurons. + push_back(this->add_neuron(std::string("h") + boost::lexical_cast(i))); + // connections + this->full_connect(this->_inputs, this->_hidden_neurons, + trait::zero()); + this->full_connect(this->_hidden_neurons, this->_outputs, + trait::zero()); + // bias outputs too + for (size_t i = 0; i < nb_outputs; ++i) + this->add_connection(this->get_input(nb_inputs), this->get_output(i), + trait::zero()); + } + unsigned get_nb_inputs() const { return this->_inputs.size() - 1; } + void step(const std::vector& in) + { + assert(in.size() == this->get_nb_inputs()); + std::vector inf = in; + inf.push_back(1.0f); + nn_t::_step(inf); + } + protected: + std::vector _hidden_neurons; + }; + + // a basic MLP with float weights + typedef Mlp, AfSigmoidNoBias<> >, Connection<> > mlp_t; + +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/neuron.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/neuron.hpp new file mode 100644 index 000000000..03f419ae9 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/neuron.hpp @@ -0,0 +1,157 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef NN_NEURON_HPP +#define NN_NEURON_HPP + +#include + +#include "trait.hpp" + +namespace nn +{ + + // generic neuron + // Pot : potential functor (see pf.hpp) + // Act : activation functor (see af.hpp) + // IO : type of coupling between "neurons" (float or std::pair) + template + class Neuron + { + public: + typedef typename Pot::weight_t weight_t; + typedef IO io_t; + typedef Pot pf_t; + typedef Act af_t; + static io_t zero() { return trait::zero(); } + Neuron() : + _current_output(zero()), + _next_output(zero()), + _fixed(false), + _in(-1), + _out(-1) + {} + bool get_fixed() const { return _fixed; } + void set_fixed(bool b = true) { _fixed = b; } + io_t activate() + { + if (!_fixed) + _next_output = _af(_pf(_inputs)); + return _next_output; + } + + void init() + { + _pf.init(); + _af.init(); + if (get_in_degree() != 0) + _inputs = trait::zero(get_in_degree()); + _current_output = zero(); + _next_output = zero(); + } + + void set_input(unsigned i, const io_t& in) { assert(i < _inputs.size()); _inputs[i] = in; } + + void set_weight(unsigned i, const weight_t& w) { _pf.set_weight(i, w); } + + typename af_t::params_t& get_afparams() { return _af.get_params(); } + typename pf_t::params_t& get_pfparams() { return _pf.get_params(); } + const typename af_t::params_t& get_afparams() const { return _af.get_params(); } + const typename pf_t::params_t& get_pfparams() const { return _pf.get_params(); } + void set_afparams(const typename af_t::params_t& p) { _af.set_params(p); } + void set_pfparams(const typename pf_t::params_t& p) { _pf.set_params(p); } + + void step() { _current_output = _next_output; } + void set_in_degree(unsigned k) + { + _pf.set_nb_weights(k); + _inputs.resize(k); + if (k == 0) + return; + _inputs = trait::zero(k); + } + unsigned get_in_degree() const { return _pf.get_weights().size(); } + + // for input neurons + void set_current_output(const io_t& v) { _current_output = v; } + void set_next_output(const io_t& v) { _next_output = v; } + + // standard output + const io_t& get_current_output() const { return _current_output; } + + // next output + const io_t& get_next_output() const { return _next_output; } + + // i/o + int get_in() const { return _in; } + void set_in(int i) { _in = i; } + int get_out() const { return _out; } + void set_out(int o) { _out = o; } + bool is_input() const { return _in != -1; } + bool is_output() const { return _out != -1; } + + const Pot& get_pf() const { return _pf; } + Pot& get_pf() { return _pf; } + + const Act& get_af() const { return _af; } + Act& get_af() { return _af; } + + void set_id(const std::string& s) { _id = s; } + const std::string& get_id() const { return _id; } + const std::string& get_label() const { return _label; } + + // for graph algorithms + std::string _id; + std::string _label; + boost::default_color_type _color; + int _index; + protected: + // activation functor + Act _af; + // potential functor + Pot _pf; + // outputs + io_t _current_output; + io_t _next_output; + // cache + typename trait::vector_t _inputs; + // fixed = current_output is constant + bool _fixed; + // -1 if not an input of the nn, id of input otherwise + int _in; + // -1 if not an output of the nn, id of output otherwise + int _out; + }; +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/nn.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/nn.hpp new file mode 100644 index 000000000..fd704fa34 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/nn.hpp @@ -0,0 +1,627 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef _NN_HPP_ +#define _NN_HPP_ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "pf.hpp" +#include "af.hpp" +#include "neuron.hpp" +#include "connection.hpp" + +namespace nn +{ + // a useful boost functor + template + class bfs_pred_visitor : public boost::default_bfs_visitor + { + public: + bfs_pred_visitor(std::set& pred) : _pred(pred) {} + template + void discover_vertex(Vertex u, const Graph & g) + { + _pred.insert(u); + } + protected: + std::set& _pred; + }; + + + // main class + // N : neuron type, C : connection type + template + class NN + { + public: + // types + typedef boost::adjacency_list graph_t; + typedef typename boost::graph_traits::vertex_iterator vertex_it_t; + typedef typename boost::graph_traits::edge_iterator edge_it_t; + typedef typename boost::graph_traits::out_edge_iterator out_edge_it_t; + typedef typename boost::graph_traits::in_edge_iterator in_edge_it_t; + typedef typename boost::graph_traits::edge_descriptor edge_desc_t; + typedef typename boost::graph_traits::vertex_descriptor vertex_desc_t; + typedef typename boost::graph_traits::adjacency_iterator adj_it_t; + typedef typename std::vector vertex_list_t; + typedef N neuron_t; + typedef C conn_t; + typedef typename N::af_t af_t; + typedef typename N::pf_t pf_t; + typedef typename C::weight_t weight_t; + typedef typename C::io_t io_t; + + // constructor + NN() : _neuron_counter(0), _init_done(false) + {} + NN(const NN& o) { *this = o; } + NN& operator=(const NN& o) + { + if (&o == this) + return *this; + _g = o._g; + _neuron_counter = o._neuron_counter; + _inputs.clear(); + _outputs.clear(); + _inputs.resize(o.get_nb_inputs()); + _outputs.resize(o.get_nb_outputs()); + _init_io(); + _init_done = false; + return *this; + } + // init + void init() { _init(); } + // set id for inputs and outputs + void name_io() { _name_io(); } + // load/write + //void load(const std::string& fname) { _load_graph(fname); } + // void write(const std::string& fname) { _write_graph(fname); } + void write(std::ostream& ofs) { _write_dot(ofs); } + void dump(std::ostream& ofs) const + { + std::pair vp; + for (vp = boost::vertices(_g); vp.first != vp.second; ++vp.first) + ofs<<_g[*vp.first]._id<<" "<<_g[*vp.first].get_next_output()<<" "; + ofs<(_neuron_counter++); + _g[v]._label = label; + return v; + } + + vertex_desc_t add_neuron(const std::string& label, + const typename pf_t::params_t& pf_params, + const typename af_t::params_t& af_params) + { + vertex_desc_t v = add_neuron(label); + _g[v].set_pfparams(pf_params); + _g[v].set_afparams(af_params); + return v; + } + + bool add_connection(const vertex_desc_t& u, + const vertex_desc_t& v, + weight_t weight) + { + std::pair e = add_edge(u, v, _g); + if (e.second) + _g[e.first].set_weight(weight); + return e.second; + } + // special version when you need to increase weight + bool add_connection_w(const vertex_desc_t& u, + const vertex_desc_t& v, + weight_t weight) + { + std::pair e = add_edge(u, v, _g); + if (e.second) + _g[e.first].set_weight(weight); + else + _g[e.first].set_weight(_g[e.first].get_weight() + weight); + return e.second; + } + + void set_all_pfparams(const std::vector& pfs) + { + assert(num_vertices(_g) == pfs.size()); + size_t k = 0; + BGL_FORALL_VERTICES_T(v, _g, graph_t) + _g[v].set_pfparamst(pfs[k++]); + } + + void set_all_afparams(const std::vector& afs) + { + assert(num_vertices(_g) == afs.size()); + size_t k = 0; + BGL_FORALL_VERTICES_T(v, _g, graph_t) + _g[v].set_afparamst(afs[k++]); + } + + void set_all_weights(const std::vector& ws) + { +#ifndef NDEBUG + if (num_edges(_g) != ws.size()) + std::cout << "param errors: " + << num_edges(_g) + << " whereas " + << ws.size() + << " provided" <_g[v].set_in(k++); + } + } + void set_nb_outputs(unsigned i) + { + _outputs.resize(i); + size_t k = 0; + BOOST_FOREACH(vertex_desc_t& v, _outputs) + { + v = add_vertex(_g); + this->_g[v].set_out(k++); + } + } + vertex_desc_t get_input(int i) const + { + assert((size_t)i < _inputs.size()); + assert(this->_g[_inputs[i]].get_in() != -1); + return _inputs[i]; + } + const std::vector& get_inputs() const { return _inputs; } + const std::vector& get_outputs() const { return _outputs; } + // warning : O(n) + vertex_desc_t get_neuron(size_t i) const + { + i = std::min(num_vertices(_g) - 1, i); + size_t k = 0; + BGL_FORALL_VERTICES_T(v, _g, graph_t) + if (k++ == i) + return v; + assert(0); + return (vertex_desc_t)(0x0);; + } + vertex_list_t get_neuron_list() + { + vertex_list_t neuron_list; + std::pair vp; + for (vp = boost::vertices(_g); vp.first != vp.second; ++vp.first) + { + neuron_list.push_back(vertex_desc_t(*vp.first)); + } + return neuron_list; + } + neuron_t& get_neuron_by_vertex(vertex_desc_t v) + { + return this->_g[v]; + } + io_t get_neuron_output(size_t i) const + { + return _g[get_neuron(i)].get_current_output(); + } + + std::string get_neuron_id(size_t i) const + { + return _g[get_neuron(i)]._id; + } + vertex_desc_t get_output(int i) const + { + assert((size_t) i < _outputs.size()); + assert(this->_g[_outputs[i]].get_out() != -1); + return _outputs[i]; + } + const N& get_output_neuron(int i) const + { + return _g[_outputs[i]]; + } + bool is_output(const vertex_desc_t& v) const + { + return std::find(_outputs.begin(), _outputs.end(), v) != _outputs.end(); + } + bool is_input(const vertex_desc_t& v) const + { + return std::find(_inputs.begin(), _inputs.end(), v) != _inputs.end(); + } + + // step + void step(const std::vector& inputs) { _step(inputs); } + + // accessors + const std::vector& get_outf() const { return _outf; } + io_t get_outf(unsigned i) const { return _outf[i]; } + const std::vector& outf() const { return get_outf(); } + io_t outf(unsigned i) const { return get_outf(i); } + unsigned get_nb_inputs() const { return _inputs.size(); } + unsigned get_nb_outputs() const { return _outputs.size(); } + unsigned get_nb_connections() const { return num_edges(_g); } + unsigned get_nb_neurons() const { return num_vertices(_g); } + + // subnns + void remove_subnn(const std::set& subnn) + { + BOOST_FOREACH(vertex_desc_t v, subnn) + if (!is_input(v) && !is_output(v)) + { + clear_vertex(v, _g); + remove_vertex(v, _g); + } + _init_io(); + } + template + void add_subnn(const NN& nn, + const std::vector& inputs, + const std::vector& outputs) + { + assert(inputs.size() == nn.get_nb_inputs()); + assert(outputs.size() == nn.get_nb_outputs()); + std::map rmap; + const typename NN::graph_t& g_src = nn.get_graph(); + BGL_FORALL_VERTICES_T(v, g_src, typename NN::graph_t) + if (g_src[v].get_in() == -1 && g_src[v].get_out() == -1) + { + vertex_desc_t nv = add_vertex(_g); + _g[nv] = g_src[v]; + _g[nv]._id = boost::lexical_cast(_neuron_counter++); + rmap[v] = nv; + } + + std::vector vnodes; + // hoping that the order did not change too much + BGL_FORALL_VERTICES_T(v, _g, graph_t) + vnodes.push_back(v); + + BGL_FORALL_EDGES_T(e, g_src, typename NN::graph_t) + { + std::pair ne; + int in = g_src[source(e, g_src)].get_in(); + int out = g_src[target(e, g_src)].get_out(); + assert(in == -1 || in < inputs.size()); + assert(out == -1 || out < outputs.size()); + if (in != -1 && out != -1) + { + int n_in = std::min(vnodes.size() - 1, inputs[in]); + int n_out = std::min(vnodes.size() - 1, outputs[out]); + ne = add_edge(vnodes[n_in], vnodes[n_out], _g); + } + else if (in != -1) + { + int n_in = std::min(vnodes.size() - 1, inputs[in]); + ne = add_edge(vnodes[n_in], rmap[target(e, g_src)], _g); + } + else if (out != -1) + { + int n_out = std::min(vnodes.size() - 1, outputs[out]); + ne = add_edge(rmap[source(e, g_src)], vnodes[n_out], _g); + } + else + { + assert(rmap.find(source(e, g_src)) != rmap.end()); + assert(rmap.find(target(e, g_src)) != rmap.end()); + ne = add_edge(rmap[source(e, g_src)], rmap[target(e, g_src)], _g); + } + _g[ne.first] = g_src[e]; + } + + _init_io(); + } + + // remove the connection with a weigth that is smaller (in absolute value) to the threshold + // !!! WARNING + // this method will destroy your neural network... + int remove_low_weights(float threshold) + { + int nb_removed = 0; + std::vector to_remove; + BGL_FORALL_EDGES_T(e, this->_g, graph_t) + { + if (fabs(_g[e].get_weight()) < threshold) + to_remove.push_back(e); + } + for (size_t i = 0; i < to_remove.size(); ++i) + remove_edge(to_remove[i], this->_g); + return to_remove.size(); + } + + // remove neurons that are not connected to both one input and + // one output (this is NOT callled automatically in NN + // + // WARNING: if simplify_in is true, this can change the behavior + // of neurons since neurons not connected to inputs but connected + // to outputs can output a constant value + // + // principle : keep the neurons that are successors of inputs + // and predecessors of outputs + void simplify(bool simplify_in = false) + { + // we need sets and not lists withouh io + std::set all_neurons; + BGL_FORALL_VERTICES_T(v, this->_g, graph_t) + if (!is_input(v) && !is_output(v)) + all_neurons.insert(v); + std::set out_preds, in_succs; + + // out + BOOST_FOREACH(vertex_desc_t v, this->_outputs) + { + std::set preds; + nn::bfs_pred_visitor vis(preds); + breadth_first_search(boost::make_reverse_graph(_g), + v, color_map(get(&N::_color, _g)).visitor(vis)); + out_preds.insert(preds.begin(), preds.end()); + } + // in + if (simplify_in) + BOOST_FOREACH(vertex_desc_t v, this->_inputs) + { + std::set succs; + nn::bfs_pred_visitor vis(succs); + breadth_first_search(_g, + v, color_map(get(&N::_color, _g)).visitor(vis)); + in_succs.insert(succs.begin(), succs.end()); + } + else + in_succs = all_neurons; + // make the intersection of in_succ and out_preds + std::set valid_neurons; + std::set_intersection(in_succs.begin(), in_succs.end(), + out_preds.begin(), out_preds.end(), + std::insert_iterator >(valid_neurons, + valid_neurons.begin())); + // get the list of neurons that are NOT in valid_neurons + std::set to_remove; + std::set_difference(all_neurons.begin(), all_neurons.end(), + valid_neurons.begin(), valid_neurons.end(), + std::insert_iterator >(to_remove, + to_remove.begin())); + // remove these neurons + BOOST_FOREACH(vertex_desc_t v, to_remove) + { + clear_vertex(v, _g); + remove_vertex(v, _g); + } + } + // fully connect two vectors of neurons + void full_connect(const std::vector v1, + const std::vector v2, + const weight_t& w) + { + BOOST_FOREACH(vertex_desc_t x, v1) + BOOST_FOREACH(vertex_desc_t y, v2) + this->add_connection(x, y, w); + } + + // 1 to 1 connection + void connect(const std::vector v1, + const std::vector v2, + const weight_t& w) + { + assert(v1.size() == v2.size()); + for (size_t i = 0; i < v1.size(); ++i) + this->add_connection(v1[i], v2[i], w); + } + protected: + // attributes + graph_t _g; + vertex_list_t _inputs; + vertex_list_t _outputs; + std::vector _outf; + int _neuron_counter; + bool _init_done; + + // methods + void _write_dot(std::ostream& ofs) + { + ofs << "digraph G {" << std::endl; + BGL_FORALL_VERTICES_T(v, this->_g, graph_t) + { + ofs << this->_g[v].get_id(); + ofs << " [label=\""<_g[v].get_id()<<"\""; + // ofs << " af"<< this->_g[v].get_afparams(); + // ofs << "| pf"<< this->_g[v].get_pfparams() <<"\""; + if (is_input(v) || is_output(v)) + ofs<<" shape=doublecircle"; + + ofs <<"]"<< std::endl; + } + BGL_FORALL_EDGES_T(e, this->_g, graph_t) + { + ofs << this->_g[source(e, this->_g)].get_id() + << " -> " << this->_g[target(e, this->_g)].get_id() + << "[label=\"" << _g[e].get_weight() << "\"]" << std::endl; + } + ofs << "}" << std::endl; + } + + void _activate(vertex_desc_t n) + { + using namespace boost; + if (_g[n].get_fixed()) return; + + in_edge_it_t in, in_end; + unsigned i = 0; + for (tie(in, in_end) = in_edges(n, _g); in != in_end; ++in, ++i) + _g[n].set_input(i, _g[source(*in, _g)].get_current_output()); + + _g[n].activate(); + } + + void _set_in(const std::vector& inf) + { + assert(inf.size() == _inputs.size()); + if (inf.size()>0) { + unsigned i = 0; + for (typename vertex_list_t::const_iterator it = _inputs.begin(); + it != _inputs.end(); ++it, ++i) + { + _g[*it].set_current_output(inf[i]); + _g[*it].set_next_output(inf[i]); + } + } + } + void _set_out() + { + unsigned i = 0; + for (typename vertex_list_t::const_iterator it = _outputs.begin(); + it != _outputs.end(); ++it, ++i) + _outf[i] = _g[*it].get_current_output(); + } + void _step(const std::vector& inf) + { + assert(_init_done); + // in + _set_in(inf); + + // activate + std::pair vp; + for (vp = boost::vertices(_g); vp.first != vp.second; ++vp.first) + _activate(*vp.first); + + // step + for (vp = boost::vertices(_g); vp.first != vp.second; ++vp.first) + _g[*vp.first].step(); + + // out + _set_out(); + } + + void _name_io() + { + int i=0; + BOOST_FOREACH(vertex_desc_t v, _inputs) + { + _g[v]._id = std::string("i") + boost::lexical_cast(i); + ++i; + } + i = 0; + BOOST_FOREACH(vertex_desc_t v, _outputs) { + _g[v]._id = std::string("o") + boost::lexical_cast(i); + ++i; + } + } + void _init() + { + // BOOST_MPL_ASSERT((boost::mpl::is_same)); + // BOOST_MPL_ASSERT((boost::mpl::is_same)); + _outf.clear(); + in_edge_it_t in, in_end; + std::pair vp; + int k = 0; + for (vp = boost::vertices(_g); vp.first != vp.second; ++vp.first) + { + vertex_desc_t n = *vp.first; + _g[n].set_in_degree(in_degree(n, _g)); + _g[n].set_id(boost::lexical_cast(k++)); + unsigned i = 0; + for (tie(in, in_end) = in_edges(n, _g); in != in_end; ++in, ++i) + _g[n].set_weight(i, _g[*in].get_weight()); + } + _outf.resize(_outputs.size()); + BOOST_FOREACH(vertex_desc_t v, _inputs) + { + _g[v].set_fixed(); + _g[v].set_current_output(N::zero()); + } + // init to 0 + for (vp = boost::vertices(_g); vp.first != vp.second; ++vp.first) + _g[*vp.first].init(); + _init_io(); + _name_io(); + _init_done = true; + } + void _init_io() + { + BGL_FORALL_VERTICES_T(v, _g, graph_t) + { + if (_g[v].get_in() != -1) + { + assert(_g[v].get_in() < (int)_inputs.size()); + _inputs[_g[v].get_in()] = v; + } + if (_g[v].get_out() != -1) + { + assert(_g[v].get_out() < (int)_outputs.size()); + _outputs[_g[v].get_out()] = v; + } + } + } + }; +} + +#endif + + + diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/params.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/params.hpp new file mode 100644 index 000000000..b701729ef --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/params.hpp @@ -0,0 +1,93 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef _NN_PARAMS_HPP_ +#define _NN_PARAMS_HPP_ +namespace nn +{ + namespace params + { + struct Dummy + { + friend std::ostream& operator<<(std::ostream& output, const Dummy& e); + Dummy() : _x(42) {} + void mutate() {} + void random() {} + void develop() {} + size_t size() const { return 0; } + float data(size_t i) const { return 0.0f;} + float& operator[](size_t i) { return _x; } + float operator[](size_t i) const { return _x; } + template + void serialize(A& ar, unsigned int v) {} + typedef float type_t; + protected: + float _x; + }; + + std::ostream& operator<<(std::ostream& output, const Dummy& e) { + return output; + } + template + struct Vectorf + { + typedef float type_t; + BOOST_STATIC_CONSTEXPR int s=S; + Vectorf() : _data(S) {} + // magic cast ! + template + Vectorf(const T& v) : + _data(S) + { + assert(v.size() == S); + for (size_t i = 0; i < v.size(); ++i) + _data[i] = v.data(i); + } + float data(size_t i) const { assert(i < S) ; return _data[i]; } + float& operator[](size_t i) { return _data[i]; } + float operator[](size_t i) const { return _data[i]; } + + size_t size() const { return _data.size(); } + void mutate() {} + void random() {} + void develop() {} + protected: + std::vector _data; + }; + } + + + +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/pf.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/pf.hpp new file mode 100644 index 000000000..28efce710 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/pf.hpp @@ -0,0 +1,168 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef NN_PF_HPP +#define NN_PF_HPP + +#include +#include + +#include "params.hpp" +#include "trait.hpp" + +// potential functions (weighted sum, leaky integrator, etc.) +namespace nn +{ + template + class Pf + { + public: + typedef P params_t; + typedef W weight_t; + const params_t& get_params() const { return _params; } + params_t& get_params() { return _params; } + void set_params(const params_t& params) { _params = params; } + void init() {} + // weights are stored in the pf function for efficiency reasons + void set_nb_weights(size_t n) { _weights.resize(n); } + void set_weight(size_t i, const W& w) { assert(i < _weights.size()); _weights[i] = w; } + const std::valarray& get_weights() const { return _weights; } + // main function + template + float operator() (const typename trait::vector_t & inputs) const { return 0.0f; } + protected: + params_t _params; + // cache weights + std::valarray _weights; + }; + + template + struct PfWSum : public Pf + { + typedef params::Dummy params_t; + typedef W weight_t; + void init() + { + _w_cache.resize(this->_weights.size()); + for (size_t i = 0; i < this->_weights.size(); ++i) + _w_cache[i] = trait::single_value(this->_weights[i]); + } + float operator() (const trait::vector_t & inputs) const + { + assert(inputs.size() == _w_cache.size()); + //std::cout<<"in:"<"<< + //_w_cache.dot(inputs)< no vectorization of pwfsum" + return (_w_cache * inputs).sum(); +#endif + } + protected: + trait::vector_t _w_cache; + }; + + + + // Ijsspert's coupled non-linear oscillators + // see : Learning to Move in Modular Robots using Central Pattern + // Generators and Online Optimization, 2008 + // Main parameters: + // - phi_i: phase lag + // - r_i: amplitude + // - x_i: offset + // - _omega: frequency + template + struct PfIjspeert : public Pf, P> + { + typedef std::pair weight_t; + typedef P params_t; + BOOST_STATIC_CONSTEXPR float dt = 0.01; + BOOST_STATIC_CONSTEXPR float a_r = 20.0f; + BOOST_STATIC_CONSTEXPR float a_x = 20.0f; + void set_r(float r) { _r = r; } + void set_x(float x) { _x = x; } + void set_omega(float o) { _omega = o; } + float get_theta_i() const { return _theta_i; } + void init() + { + _phi_i = 0; //sferes::misc::rand(); + _r_i = 0; //sferes::misc::rand(); + _x_i = 0; //sferes::misc::rand(); + _theta_i = 0; + _phi_i_d = 0; _r_i_d = 0; _x_i_d = 0; + } + // depends on r_j and phi_j + weight_t operator() (const trait::vector_t & inputs) + { + _phi_i_d = _omega; + for (size_t j = 0; j < inputs.size(); ++j) + { + float r_j = inputs[j].first; + float phi_j = inputs[j].second; + float w_ij = this->_weights[j].first; + float phi_ij = this->_weights[j].second; + std::cout << "phi_ij:" << phi_ij << " " << inputs.size() << std::endl; + _phi_i_d += w_ij * r_j * sin(phi_j - phi_ij - _phi_i); + } + float r_i_dd = a_r * (a_r / 4 * (_r - _r_i) - _r_i_d); + float x_i_dd = a_r * (a_r / 4 * (_x - _x_i) - _x_i_d); + + // integrate + _r_i_d += r_i_dd * dt; + _r_i += _r_i_d * dt; + _x_i_d += x_i_dd * dt; + _x_i += _x_i_d * dt; + _phi_i += _phi_i_d * dt; + + // result + _theta_i = _x_i + _r_i *cos(_phi_i); + return std::make_pair(_r_i, _phi_i); + } + private: + // states + float _phi_i, _r_i, _theta_i, _x_i; + // states dot + float _phi_i_d, _r_i_d, _x_i_d; + // parameters + float _r, _x, _omega; + }; + +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/phen_dnn.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/phen_dnn.hpp new file mode 100644 index 000000000..612d646d7 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/phen_dnn.hpp @@ -0,0 +1,79 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef PHEN_DNN_HPP +#define PHEN_DNN_HPP + +#include +#include +#include "gen_dnn.hpp" + +namespace sferes +{ + namespace phen + { + SFERES_INDIV(Dnn, Indiv) + { + public: + void develop() + { + // develop the parameters + BGL_FORALL_VERTICES_T(v, this->gen().get_graph(), + typename nn_t::graph_t) + { + this->gen().get_graph()[v].get_afparams().develop(); + this->gen().get_graph()[v].get_pfparams().develop(); + } + BGL_FORALL_EDGES_T(e, this->gen().get_graph(), + typename nn_t::graph_t) + { + this->gen().get_graph()[e].get_weight().develop(); + } + // init everything + this->_gen.init(); + } + void show(std::ostream& os) { this->gen().write(os); } + typedef typename Gen::nn_t nn_t; + nn_t& nn() { return this->gen(); } + const nn_t& nn() const { return this->gen(); } + protected: + }; + } +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/phen_hyper_nn.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/phen_hyper_nn.hpp new file mode 100644 index 000000000..78c0908ec --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/phen_hyper_nn.hpp @@ -0,0 +1,308 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef PHEN_HYPER_NN_HPP +#define PHEN_HYPER_NN_HPP + +#include +#include +#include + +#include +#include "gen_hyper_nn.hpp" + + +namespace sferes +{ + namespace phen + { + namespace hnn + { + class Pos + { + public: + Pos() { + } + Pos(float x, float y, float z) : _x(x), _y(y), _z(z) { + } + float dist(const Pos& p) const + { + float x = _x - p._x; + float y = _y - p._y; + float z = _z - p._z; + return sqrt(x * x + y * y + z * z); + } + float x() const { + return _x; + } + float y() const { + return _y; + } + float z() const { + return _z; + } + + template + void serialize(Archive& ar, const unsigned int version) + { + ar& BOOST_SERIALIZATION_NVP(_x); + ar& BOOST_SERIALIZATION_NVP(_y); + ar& BOOST_SERIALIZATION_NVP(_z); + } + bool operator == (const Pos &p) + { return _x == p._x && _y == p._y && _z == p._z; } + protected: + float _x, _y, _z; + }; + } + + // hyperneat-inspired phenotype, based on a cppn + SFERES_INDIV(HyperNn, Indiv) + { + public: + typedef Gen gen_t; + typedef typename Params::hyper_nn::neuron_t neuron_t; + typedef typename Params::hyper_nn::connection_t connection_t; + typedef typename nn::NN nn_t; + typedef typename nn_t::vertex_desc_t v_d_t; + typedef typename gen_t::nn_t gen_nn_t; + SFERES_CONST size_t nb_pfparams = Params::hyper_nn::nb_pfparams; + SFERES_CONST size_t nb_afparams = Params::hyper_nn::nb_afparams; + SFERES_CONST size_t nb_cppn_inputs = 2 + 2; + SFERES_CONST size_t nb_cppn_outputs = 2; + + void develop() + { + this->_nn = nn_t(); + this->gen().init(); + // develop the parameters + BGL_FORALL_VERTICES_T(v, this->gen().get_graph(), + typename gen_t::nn_t::graph_t) + { + this->gen().get_graph()[v].get_afparams().develop(); + this->gen().get_graph()[v].get_pfparams().develop(); + } + BGL_FORALL_EDGES_T(e, this->gen().get_graph(), + typename gen_t::nn_t::graph_t) + this->gen().get_graph()[e].get_weight().develop(); + assert(nb_cppn_inputs == this->gen().get_nb_inputs()); + assert(nb_cppn_outputs == this->gen().get_nb_outputs()); + + _all_neurons.clear(); + + size_t d = this->gen().get_depth(); + // create the nn + _nn.set_nb_inputs(Params::hyper_nn::nb_inputs); + _nn.set_nb_outputs(Params::hyper_nn::nb_outputs); + SFERES_CONST size_t skip = + Params::hyper_nn::nb_inputs + + Params::hyper_nn::nb_outputs; + SFERES_CONST size_t skip_total = + Params::hyper_nn::nb_inputs + + Params::hyper_nn::nb_outputs + + Params::hyper_nn::nb_hidden; + + BGL_FORALL_VERTICES_T(v, _nn.get_graph(), typename nn_t::graph_t) + /**/ _all_neurons.push_back(v); + + // hidden neurons + for (size_t i = 0; i < Params::hyper_nn::nb_hidden; ++i) + { + v_d_t v = _nn.add_neuron(boost::lexical_cast(i)); + _all_neurons.push_back(v); + } + + assert(_all_neurons.size() == + Params::hyper_nn::substrate_size() / 2 + - Params::hyper_nn::nb_pfparams + - Params::hyper_nn::nb_afparams); + + // build the coordinate map + for (size_t i = 0; i < _all_neurons.size() * 2; i += 2) + this->_coords_map[_all_neurons[i / 2]] = + hnn::Pos(Params::hyper_nn::substrate(i), + Params::hyper_nn::substrate(i + 1), 0); + + // afparams and pfparams + for (size_t i = 0; i < _all_neurons.size(); ++i) + { + typename neuron_t::pf_t::params_t pfparams; + // we put pfparams & afparams in [0:1] + //for (size_t k = 0; k < nb_pfparams; ++k) + //{ + // pfparams[k] = cppn_value(skip_total + k, i, false, 1) / 2.0f + 0.5f; + // std::cout << " " << pfparams[k] << " "; + //} + typename neuron_t::af_t::params_t + afparams; + for (size_t k = 0; k < nb_afparams; ++k) + { + float b = cppn_value(skip_total + k + nb_pfparams, i, + k + 1, false) / 2.0f + 0.5f; + size_t bi = b * Params::hyper_nn::bias_size(); + bi = std::min(bi, Params::hyper_nn::bias_size() - 1); + afparams[k] = Params::hyper_nn::bias(bi); + } + _nn.get_graph()[_all_neurons[i]]. + set_pfparams(pfparams); + _nn.get_graph()[_all_neurons[i]]. + set_afparams(afparams); + } + // create connections + for (size_t i = 0; i < skip_total * 2; i += 2) + for (size_t j = 0; j < skip_total * 2; j += 2) + if (!_nn.is_input(_all_neurons[j / 2])) + { + float w = cppn_value(i, j, 0, true); + float ws = w >= 0 ? 1 : -1; + ws *= + (fabs(w) - + Params::hyper_nn::conn_threshold) / (1 - Params::hyper_nn::conn_threshold); + // TODO generalize this + // ws is in [-1, 1] TODO : no guarantee that ws is in [-1;1] + size_t wi = (int) ((ws / 2.0 + 0.5) * Params::hyper_nn::weights_size()); + wi = std::min(wi, Params::hyper_nn::weights_size() - 1); + float wf = Params::hyper_nn::weights(wi); + typename connection_t::weight_t weight = typename connection_t::weight_t(wf); + if (fabs(w) > + Params::hyper_nn::conn_threshold) + _nn.add_connection(_all_neurons[i / 2], + _all_neurons[j / 2], + weight); + } + this->_nn.init(); + } + + float cppn_value(size_t i, size_t j, + size_t n, bool ff = false) + { + assert(i < Params::hyper_nn::substrate_size()); + assert(j < Params::hyper_nn::substrate_size()); + assert(i + 1 < Params::hyper_nn::substrate_size()); + assert(j + 1 < Params::hyper_nn::substrate_size()); + assert(n < nb_cppn_outputs); + std::vector in(nb_cppn_inputs); + this->gen().init(); + in[0] = Params::hyper_nn::substrate(i); + in[1] = Params::hyper_nn::substrate(i + 1); + in[2] = Params::hyper_nn::substrate(j); + in[3] = Params::hyper_nn::substrate(j + 1); + if (in[1] == in[3]) + return 0; + if (ff && (in[1] > in[3] || fabs(in[1] - in[3]) > Params::hyper_nn::max_y)) + return 0; + for (size_t k = 0; k < this->gen().get_depth(); ++k) + this->gen().step(in); + return this->gen().get_outf(n); + } + + void write_svg(std::ostream& ofs) + { + //_nn.write(os); + //std::ofstream ofs("/tmp/nn.svg"); + ofs << ""; + for (size_t i = 0; i < _all_neurons.size() * 2; i += 2) + { + float x = Params::hyper_nn::substrate(i); + float y = Params::hyper_nn::substrate(i + 1); + ofs << "" << std::endl; + } + typedef typename nn_t::graph_t graph_t; + typedef typename nn_t::vertex_desc_t v_d_t; + const graph_t& g = this->nn().get_graph(); + + BGL_FORALL_EDGES_T(e, g, graph_t) + { + v_d_t src = boost::source(e, g); + v_d_t tgt = boost::target(e, g); + float x1 = _coords_map[src].x() * 80 + 100; + float y1 = _coords_map[src].y() * 80 + 100; + float x2 = _coords_map[tgt].x() * 80 + 100; + float y2 = _coords_map[tgt].y() * 80 + 100; + double weight = g[e].get_weight(); + ofs << " 0 ? "0,255,0" : "255,0,0") + << ");stroke-width:" << fabs(weight) + << "\"/>" + << std::endl; + } + + ofs << ""; + } + nn_t& nn() { + return _nn; + } + const nn_t& nn() const { + return _nn; + } + const std::vector& + all_neurons() const { + return _all_neurons; + } + float compute_length(float min_length) + { + float length = 0; + BGL_FORALL_EDGES_T(e, _nn.get_graph(), typename nn_t::graph_t) + { + typename nn_t::vertex_desc_t src = boost::source(e, _nn.get_graph()); + typename nn_t::vertex_desc_t tgt = boost::target(e, _nn.get_graph()); + double weight = _nn.get_graph()[e].get_weight(); + float l = _coords_map[src].dist(_coords_map[tgt]); + length += l > min_length ? l : 0; + } + return length; + } + + protected: + nn_t _nn; + std::vector _all_neurons; + std::map _coords_map; + }; + } +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/test_dnn.cpp b/modules/dnns_easily_fooled/sferes/modules/nn2/test_dnn.cpp new file mode 100644 index 000000000..0ccdd4cb7 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/test_dnn.cpp @@ -0,0 +1,219 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE dnn + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include "gen_dnn.hpp" +#include "phen_dnn.hpp" + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float; + +template +void check_list_equal(const T1& v1, const T2& v2) +{ + BOOST_CHECK_EQUAL(v1.size(), v2.size()); + typename T1::const_iterator it1 = v1.begin(); + typename T1::const_iterator it2 = v2.begin(); + for (; it1 != v1.end(); ++it1, ++it2) + BOOST_CHECK(fabs(*it1 - *it2) < 1e-3); +} + +template +void check_nn_equal(NN& nn1, NN& nn2) +{ + nn1.init(); + nn2.init(); + + BOOST_CHECK_EQUAL(nn1.get_nb_inputs(), nn2.get_nb_inputs()); + BOOST_CHECK_EQUAL(nn1.get_nb_outputs(), nn2.get_nb_outputs()); + BOOST_CHECK_EQUAL(nn1.get_nb_neurons(), nn2.get_nb_neurons()); + BOOST_CHECK_EQUAL(nn1.get_nb_connections(), nn2.get_nb_connections()); +// nn1.write("/tmp/tmp1.dot"); +// nn2.write("/tmp/tmp2.dot"); +// std::ifstream ifs1("/tmp/tmp1.dot"), ifs2("/tmp/tmp2.dot"); +// while(!ifs1.eof() && !ifs2.eof()) +// { +// //if (ifs1.get() != ifs2.get()) exit(1); +// BOOST_CHECK_EQUAL((char)ifs1.get(), (char)ifs2.get()); +// } + + std::pair vp1 = + boost::vertices(nn1.get_graph()); + std::pair vp2 = + boost::vertices(nn2.get_graph()); + while (vp1.first != vp1.second) + { + BOOST_CHECK_EQUAL(nn1.get_graph()[*vp1.first].get_in_degree(), + nn2.get_graph()[*vp2.first].get_in_degree()); + check_list_equal(nn1.get_graph()[*vp1.first].get_afparams(), + nn2.get_graph()[*vp1.first].get_afparams()); + check_list_equal(nn1.get_graph()[*vp1.first].get_pfparams(), + nn2.get_graph()[*vp1.first].get_pfparams()); + ++vp1.first; + ++vp2.first; + } + +} + + +struct Params +{ + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + struct parameters + { + // maximum value of parameters + SFERES_CONST float min = -5.0f; + // minimum value + SFERES_CONST float max = 5.0f; + }; + struct dnn + { + SFERES_CONST size_t nb_inputs = 4; + SFERES_CONST size_t nb_outputs = 1; + SFERES_CONST size_t min_nb_neurons = 4; + SFERES_CONST size_t max_nb_neurons = 5; + SFERES_CONST size_t min_nb_conns = 100; + SFERES_CONST size_t max_nb_conns = 101; + SFERES_CONST float max_weight = 2.0f; + SFERES_CONST float max_bias = 2.0f; + + SFERES_CONST float m_rate_add_conn = 1.0f; + SFERES_CONST float m_rate_del_conn = 1.0f; + SFERES_CONST float m_rate_change_conn = 1.0f; + SFERES_CONST float m_rate_add_neuron = 1.0f; + SFERES_CONST float m_rate_del_neuron = 1.0f; + + SFERES_CONST int io_param_evolving = true; + SFERES_CONST init_t init = random_topology; + }; +}; + +BOOST_AUTO_TEST_CASE(direct_gen) +{ + using namespace nn; + typedef phen::Parameters, fit::FitDummy<>, Params> weight_t; + typedef phen::Parameters, fit::FitDummy<>, Params> bias_t; + typedef PfWSum pf_t; + typedef AfTanh af_t; + + sferes::gen::Dnn, Connection, Params> gen1, gen2, gen3, gen4; + + gen1.random(); + gen2.random(); + + gen1.cross(gen2, gen3, gen4); + gen3.mutate(); + gen4.mutate(); + gen2.mutate(); +} + + + +BOOST_AUTO_TEST_CASE(direct_nn_serialize) +{ + srand(0); + + using namespace nn; + typedef phen::Parameters, fit::FitDummy<>, Params> weight_t; + typedef phen::Parameters, fit::FitDummy<>, Params> bias_t; + typedef PfWSum pf_t; + typedef AfTanh af_t; + typedef sferes::gen::Dnn, Connection, Params> gen_t; + typedef phen::Dnn, Params> phen_t; + + + typedef boost::archive::binary_oarchive oa_t; + typedef boost::archive::binary_iarchive ia_t; + + for (size_t i = 0; i < 10; ++i) + { + phen_t indiv[3]; + indiv[0].random(); + indiv[0].mutate(); + indiv[0].mutate(); + indiv[0].mutate(); + indiv[0].nn().init(); + { + std::ofstream ofs("/tmp/serialize_nn1.bin", std::ios::binary); + oa_t oa(ofs); + oa & indiv[0]; + } + { + std::ifstream ifs("/tmp/serialize_nn1.bin", std::ios::binary); + ia_t ia(ifs); + ia & indiv[1]; + } + indiv[2].nn() = indiv[0].nn(); + using namespace boost::assign; + std::vector in = list_of(0.5f)(1.0f)(-0.25f)(1.101f); + for (size_t j = 0; j < 3; ++j) + indiv[j].nn().init(); + for (size_t i = 0; i < 10; ++i) + for (size_t j = 0; j < 3; ++j) + indiv[j].nn().step(in); + + for (size_t j = 1; j < 3; ++j) + BOOST_CHECK_CLOSE(indiv[0].nn().get_outf(0), indiv[j].nn().get_outf(0), 1e-5); + } +} + diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/test_dnn_ff.cpp b/modules/dnns_easily_fooled/sferes/modules/nn2/test_dnn_ff.cpp new file mode 100644 index 000000000..1c253fe82 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/test_dnn_ff.cpp @@ -0,0 +1,145 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE dnn_ff + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "gen_dnn_ff.hpp" +#include "phen_dnn.hpp" + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float; +struct Params +{ + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + struct parameters + { + // maximum value of parameters + SFERES_CONST float min = -5.0f; + // minimum value + SFERES_CONST float max = 5.0f; + }; + struct dnn + { + SFERES_CONST size_t nb_inputs = 4; + SFERES_CONST size_t nb_outputs = 2; + SFERES_CONST size_t min_nb_neurons = 4; + SFERES_CONST size_t max_nb_neurons = 5; + SFERES_CONST size_t min_nb_conns = 100; + SFERES_CONST size_t max_nb_conns = 101; + + SFERES_CONST float m_rate_add_conn = 1.0f; + SFERES_CONST float m_rate_del_conn = 0.1f; + SFERES_CONST float m_rate_change_conn = 1.0f; + SFERES_CONST float m_rate_add_neuron = 1.0f; + SFERES_CONST float m_rate_del_neuron = 1.0f; + + SFERES_CONST int io_param_evolving = true; + SFERES_CONST init_t init = ff; + }; +}; + + + +struct cycle_detector : public boost::dfs_visitor<> +{ + cycle_detector(bool& has_cycle) + : m_has_cycle(has_cycle) { } + + template + void back_edge(Edge, Graph&) { m_has_cycle = true; } +protected: + bool& m_has_cycle; +}; + +BOOST_AUTO_TEST_CASE(direct_nn_ff) +{ + srand(time(0)); + typedef phen::Parameters, fit::FitDummy<>, Params> weight_t; + typedef phen::Parameters, fit::FitDummy<>, Params> bias_t; + typedef nn::PfWSum pf_t; + typedef nn::AfTanh af_t; + typedef nn::Neuron neuron_t; + typedef nn::Connection connection_t; + typedef gen::DnnFF gen_t; + typedef phen::Dnn, Params> phen_t; + phen_t i; + i.random(); + i.develop(); + std::vector in(4); + std::fill(in.begin(), in.end(), 0); + i.nn().step(in); + BOOST_CHECK_EQUAL(i.nn().get_nb_inputs(), 4); + BOOST_CHECK_EQUAL(i.nn().get_nb_outputs(), 2); + BOOST_CHECK_EQUAL(i.nn().get_nb_neurons(), 6); + BOOST_CHECK_EQUAL(i.nn().get_nb_connections(), 8); + std::ofstream ofs("/tmp/nn.dot"); + i.nn().write(ofs); + for (size_t k = 0; k < 40; ++k) + i.mutate(); + std::ofstream ofs2("/tmp/nn2.dot"); + i.nn().write(ofs2); + bool has_cycle = false; + cycle_detector vis(has_cycle); + boost::depth_first_search(i.nn().get_graph(), + boost::color_map(get(&phen_t::nn_t::neuron_t::_color, + i.nn().get_graph())).visitor(vis)); + BOOST_CHECK(!has_cycle); + +} + diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/test_hyper_nn.cpp b/modules/dnns_easily_fooled/sferes/modules/nn2/test_hyper_nn.cpp new file mode 100644 index 000000000..fde5464a5 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/test_hyper_nn.cpp @@ -0,0 +1,240 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE dnn + +#include +#include +#include +#include + +#include "gen_dnn.hpp" +#include "gen_hyper_nn.hpp" +#include "phen_hyper_nn.hpp" + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float; + +struct Params1 +{ + struct dnn + { + SFERES_CONST size_t nb_inputs = 3; + SFERES_CONST size_t nb_outputs = 1; + + SFERES_CONST float m_rate_add_conn = 1.0f; + SFERES_CONST float m_rate_del_conn = 0.3f; + SFERES_CONST float m_rate_change_conn = 1.0f; + SFERES_CONST float m_rate_add_neuron = 1.0f; + SFERES_CONST float m_rate_del_neuron = 0.2f; + + SFERES_CONST init_t init = ff; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + struct parameters + { + SFERES_CONST float min = -2.0f; + SFERES_CONST float max = 2.0f; + }; + + struct cppn + { + // params of the CPPN + struct sampled + { + SFERES_ARRAY(float, values, 0, 1, 2); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + }; +}; + + +struct Params2 +{ + struct dnn + { + SFERES_CONST size_t nb_inputs = 4; + SFERES_CONST size_t nb_outputs = 1; + + SFERES_CONST float m_rate_add_conn = 1.0f; + SFERES_CONST float m_rate_del_conn = 0.3f; + SFERES_CONST float m_rate_change_conn = 1.0f; + SFERES_CONST float m_rate_add_neuron = 1.0f; + SFERES_CONST float m_rate_del_neuron = 0.2f; + + SFERES_CONST float weight_sigma = 0.5f; + SFERES_CONST float vect_sigma = 0.5f; + SFERES_CONST float m_rate_weight = 1.0f; + SFERES_CONST float m_rate_fparams = 1.0f; + SFERES_CONST init_t init = ff; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + struct parameters + { + SFERES_CONST float min = -2.0f; + SFERES_CONST float max = 2.0f; + }; + + struct cppn + { + // params of the CPPN + struct sampled + { + SFERES_ARRAY(float, values, 0, 1, 2); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + }; + struct hyper_nn + { + SFERES_ARRAY(float, substrate, + 0.2f, 0.2f, // in 1 + 0.2f, 0.8f, // in 2 + 0.5f, 0.5f, // out 1 + 0.8f, 0.8f, // hidden 1 + 0.8f, 0.2f, // hidden 2 + 0.2f, 0.5f, // hidden 3 + 0.5f, 0.2f // hidden 4 + ); + SFERES_ARRAY(float, weights, -1, 0, 1); + SFERES_ARRAY(float, bias, -1, 0, 1); + SFERES_CONST size_t nb_inputs = 2; + SFERES_CONST size_t nb_outputs = 1; + SFERES_CONST size_t nb_hidden = 4; + SFERES_CONST size_t nb_pfparams = 0; + SFERES_CONST size_t nb_afparams = 1; + SFERES_CONST float conn_threshold = 0.2f; + SFERES_CONST float max_y = 10.0f; + typedef nn::Neuron, + nn::AfTanh > > neuron_t; + typedef nn::Connection<> connection_t; + }; +}; + + +BOOST_AUTO_TEST_CASE(gen_cppn) +{ + srand(time(0)); + typedef phen::Parameters, fit::FitDummy<>, Params1> weight_t; + typedef gen::HyperNn cppn_t; + + cppn_t gen1, gen2, gen3, gen4; + + gen1.random(); + for (size_t i = 0; i < 20; ++i) + gen1.mutate(); + gen1.init(); + BOOST_CHECK(gen1.get_depth() >= 1); + std::ofstream ofs2("/tmp/nn.dot"); + gen1.write(ofs2); + + // generate a picture + char*pic = new char[256 * 256]; + std::vector in(3); + in[0] = 1; + for (size_t i = 0; i < 256; ++i) + for (size_t j = 0; j < 256; ++j) + { + in[1] = i / 128.0f - 1.0; + in[2] = j / 128.0f - 1.0; + for (size_t k = 0; k < gen1.get_depth(); ++k) + gen1.step(in); + pic[256 * i + j] = (int)(gen1.get_outf(0) * 256 + 255); + } + std::ofstream ofs("/tmp/pic.pgm"); + ofs << "P5" << std::endl; + ofs << "256 256" << std::endl; + ofs << "255" << std::endl; + ofs.write(pic, 256 * 256); +} + + +BOOST_AUTO_TEST_CASE(phen_hyper_nn) +{ + srand(time(0)); + typedef fit::FitDummy<> fit_t; + typedef phen::Parameters, fit::FitDummy<>, Params2> weight_t; + typedef gen::HyperNn gen_t; + typedef phen::HyperNn phen_t; + + phen_t indiv; + indiv.random(); + for (size_t i = 0; i < 5; ++i) + indiv.mutate(); + std::ofstream ofs("/tmp/nn_substrate.svg"); + indiv.develop(); + indiv.show(ofs); + // BOOST_CHECK_EQUAL(indiv.nn().get_nb_neurons(), 7); + +} + diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/test_hyper_nn_anh.cpp b/modules/dnns_easily_fooled/sferes/modules/nn2/test_hyper_nn_anh.cpp new file mode 100644 index 000000000..a1d2cf83d --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/test_hyper_nn_anh.cpp @@ -0,0 +1,263 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE dnn + +#include +#include +#include +#include + +#include "gen_dnn.hpp" +#include "gen_hyper_nn.hpp" +#include "phen_hyper_nn.hpp" + +#include +#include + +using namespace sferes; +using namespace sferes::gen::dnn; +using namespace sferes::gen::evo_float; + +struct Params1 +{ + struct dnn + { + SFERES_CONST size_t nb_inputs = 3; + SFERES_CONST size_t nb_outputs = 1; + + SFERES_CONST float m_rate_add_conn = 1.0f; + SFERES_CONST float m_rate_del_conn = 0.3f; + SFERES_CONST float m_rate_change_conn = 1.0f; + SFERES_CONST float m_rate_add_neuron = 1.0f; + SFERES_CONST float m_rate_del_neuron = 0.2f; + + SFERES_CONST init_t init = ff; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + struct parameters + { + SFERES_CONST float min = -2.0f; + SFERES_CONST float max = 2.0f; + }; + + struct cppn + { + // params of the CPPN + struct sampled + { + SFERES_ARRAY(float, values, 0, 1, 2); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + }; +}; + + +struct Params2 +{ + struct dnn + { + SFERES_CONST size_t nb_inputs = 4; + SFERES_CONST size_t nb_outputs = 1; + + SFERES_CONST float m_rate_add_conn = 1.0f; + SFERES_CONST float m_rate_del_conn = 0.3f; + SFERES_CONST float m_rate_change_conn = 1.0f; + SFERES_CONST float m_rate_add_neuron = 1.0f; + SFERES_CONST float m_rate_del_neuron = 0.2f; + + SFERES_CONST float weight_sigma = 0.5f; + SFERES_CONST float vect_sigma = 0.5f; + SFERES_CONST float m_rate_weight = 1.0f; + SFERES_CONST float m_rate_fparams = 1.0f; + SFERES_CONST init_t init = ff; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + struct parameters + { + SFERES_CONST float min = -2.0f; + SFERES_CONST float max = 2.0f; + }; + + struct cppn + { + // params of the CPPN + struct sampled + { + SFERES_ARRAY(float, values, 0, 1, 2); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; + struct evo_float + { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; + }; + struct hyper_nn + { + SFERES_ARRAY(float, substrate, + 0.2f, 0.2f, // in 1 + 0.2f, 0.8f, // in 2 + 0.5f, 0.5f, // out 1 + 0.8f, 0.8f, // hidden 1 + 0.8f, 0.2f, // hidden 2 + 0.2f, 0.5f, // hidden 3 + 0.5f, 0.2f // hidden 4 + ); + SFERES_ARRAY(float, weights, -1, 0, 1); + SFERES_ARRAY(float, bias, -1, 0, 1); + SFERES_CONST size_t nb_inputs = 2; + SFERES_CONST size_t nb_outputs = 1; + SFERES_CONST size_t nb_hidden = 4; + SFERES_CONST size_t nb_pfparams = 0; + SFERES_CONST size_t nb_afparams = 1; + SFERES_CONST float conn_threshold = 0.2f; + SFERES_CONST float max_y = 10.0f; + typedef nn::Neuron, + nn::AfTanh > > neuron_t; + typedef nn::Connection<> connection_t; + }; +}; + + +BOOST_AUTO_TEST_CASE(gen_cppn) +{ + int nb_images = 0; + + for (; nb_images < 10; ++nb_images) + { + +// time_t seconds = time (nb_images); + srand (nb_images); + + std::string ts = boost::lexical_cast (nb_images); + + typedef phen::Parameters, fit::FitDummy<>, + Params1> weight_t; + typedef gen::HyperNn cppn_t; + + cppn_t gen1, gen2, gen3, gen4; + + gen1.random (); +// for (size_t i = 0; i < 20; ++i) +// gen1.mutate (); + gen1.init (); + BOOST_CHECK(gen1.get_depth () >= 1); +// std::ofstream ofs2 ("./nn.dot"); +// gen1.write (ofs2); + + // generate a picture + char*pic = new char[256 * 256]; + std::vector in (3); + in[0] = 1; + for (size_t i = 0; i < 256; ++i) + for (size_t j = 0; j < 256; ++j) + { + in[1] = i / 128.0f - 1.0; + in[2] = j / 128.0f - 1.0; + for (size_t k = 0; k < gen1.get_depth (); ++k) + gen1.step (in); + pic[256 * i + j] = (int) (gen1.get_outf (0) * 256 + 255); + } + + std::vector < std::string > list; + list.push_back ("/home/anh/workspace/sferes/tmp/"); + list.push_back ("image_"); + list.push_back (ts); + list.push_back (".pgm"); + std::string joined = boost::algorithm::join (list, ""); + + std::ofstream ofs (joined.c_str ()); + ofs << "P5" << std::endl; + ofs << "256 256" << std::endl; + ofs << "255" << std::endl; + ofs.write (pic, 256 * 256); + + } +} + +/* +BOOST_AUTO_TEST_CASE(phen_hyper_nn) +{ + srand(time(0)); + typedef fit::FitDummy<> fit_t; + typedef phen::Parameters, fit::FitDummy<>, Params2> weight_t; + typedef gen::HyperNn gen_t; + typedef phen::HyperNn phen_t; + + phen_t indiv; + indiv.random(); + for (size_t i = 0; i < 5; ++i) + indiv.mutate(); + std::ofstream ofs("/tmp/nn_substrate.svg"); + indiv.develop(); + indiv.show(ofs); + // BOOST_CHECK_EQUAL(indiv.nn().get_nb_neurons(), 7); + +} +*/ diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/test_mlp.cpp b/modules/dnns_easily_fooled/sferes/modules/nn2/test_mlp.cpp new file mode 100644 index 000000000..c269825fc --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/test_mlp.cpp @@ -0,0 +1,62 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE nn_mlp + + +#include + +#include +#include +#include +#include "mlp.hpp" + +BOOST_AUTO_TEST_CASE(nn_elman) +{ + using namespace nn; + Mlp, AfSigmoidNoBias<> >, Connection<> > nn(2, 3, 2); + nn.init(); + BOOST_CHECK_EQUAL(nn.get_nb_neurons(), 3 + 3 + 2); + BOOST_CHECK_EQUAL(nn.get_nb_connections(), 9 + 6 + 2); + std::vector in(2); + in[0] = 1.0f; + in[1] = 0.2f; + for (unsigned i = 0; i < 1000; ++i) + nn.step(in); + float out1 = nn.get_outf(0); + float out2 = nn.get_outf(1); + +} diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/test_nn.cpp b/modules/dnns_easily_fooled/sferes/modules/nn2/test_nn.cpp new file mode 100644 index 000000000..8bd593bbb --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/test_nn.cpp @@ -0,0 +1,138 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE nn + + +#include +#include +#include +#include +#include +#include "nn.hpp" + +BOOST_AUTO_TEST_CASE(nn_basic) +{ + using namespace nn; + + NN, AfTanh >, Connection<> > nn1, nn2, nn3; + + nn1.set_nb_inputs(1); + nn1.set_nb_outputs(2); + nn1.full_connect(nn1.get_inputs(), nn1.get_outputs(), 1.0f); + nn1.init(); + std::vector in(1); + in[0] = 1.0f; + for (size_t i = 0; i < 200; ++i) + nn1.step(in); + + float out = nn1.get_outf(0); + BOOST_CHECK_CLOSE((double)out, tanh(5.0 * 1.0f), 1e-5); + std::ofstream ofs("/tmp/test.dot"); + nn1.write(ofs); + + // check memory usage + nn2 = nn1; + for (size_t i = 0; i < 2000; ++i) + { + nn3 = nn2; + nn2 = nn1; + nn1 = nn3; + } + BOOST_CHECK_EQUAL(nn3.get_nb_connections(), nn1.get_nb_connections()); + BOOST_CHECK_EQUAL(nn3.get_nb_neurons(), nn1.get_nb_neurons()); + BOOST_CHECK_EQUAL(nn3.get_nb_inputs(), nn1.get_nb_inputs()); + BOOST_CHECK_EQUAL(nn3.get_nb_outputs(), nn1.get_nb_outputs()); + +} + + +BOOST_AUTO_TEST_CASE(nn_remove_small_weights) +{ + using namespace nn; + NN, AfTanh >, Connection<> > nn; + + nn.set_nb_inputs(3); + nn.set_nb_outputs(3); + nn.add_connection_w(nn.get_input(0), nn.get_output(0), 0.5); + nn.add_connection_w(nn.get_input(1), nn.get_output(0), 0.25); + nn.add_connection_w(nn.get_input(2), nn.get_output(2), -0.25); + nn.add_connection_w(nn.get_input(0), nn.get_output(2), 0.05); + nn.add_connection_w(nn.get_input(2), nn.get_output(1), -0.05); + nn.init(); + BOOST_CHECK_EQUAL(nn.get_nb_connections(), 5); + int k = nn.remove_low_weights(0.1); + std::cout << k << std::endl; + BOOST_CHECK_EQUAL(nn.get_nb_connections(), 3); +} + + +BOOST_AUTO_TEST_CASE(nn_speed) +{ + using namespace nn; + + typedef NN, AfTanh >, Connection<> > nn_t; + nn_t nn; + + nn.set_nb_inputs(40000); + nn.set_nb_outputs(4); + typedef std::vector layer_t; + //std::vector layers; + // layers.push_back(nn.get_inputs()); + // for (size_t i = 0; i < 10; ++i) + // { + // layer_t layer; + // for (size_t j = 0; j < 10; ++j) + // layer.push_back(nn.add_neuron("n")); + // layers.push_back(layer); + // } + // layers.push_back(nn.get_outputs()); + // for (size_t i = 0; i < layers.size() - 1; ++i) + // nn.full_connect(layers[i], layers[i + 1], 1.0); + nn.full_connect(nn.get_inputs(), nn.get_outputs(), 0.25); + + nn.init(); + boost::timer timer; + std::vector in(40000); + std::fill(in.begin(), in.end(), 0.10f); + for (size_t i = 0; i < 100; ++i) + nn.step(in); + std::cout<<"timer (1000 iterations):" << timer.elapsed() << std::endl; + std::ofstream ofs("/tmp/test.dot"); + nn.write(ofs); + +} diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/test_osc.cpp b/modules/dnns_easily_fooled/sferes/modules/nn2/test_osc.cpp new file mode 100644 index 000000000..d15643da5 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/test_osc.cpp @@ -0,0 +1,93 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE nn_osc + + +#include + +#include +#include +#include +#include "nn.hpp" + +BOOST_AUTO_TEST_CASE(nn_osc) +{ + using namespace nn; + typedef std::pair weight_t; + typedef PfIjspeert > pf_t; + typedef AfDirectT af_t; + typedef Neuron neuron_t; + typedef Connection connection_t; + typedef NN nn_t; + typedef nn_t::vertex_desc_t vertex_desc_t; + nn_t nn; + std::vector vs; + + float omega = 0.6; + float x = 0; + float r = 0.59; + float phi_ij = 0.81; + for (size_t i = 0; i < 5; ++i) + { + vertex_desc_t v = nn.add_neuron(boost::lexical_cast(i)); + nn.get_neuron_by_vertex(v).get_pf().set_omega(omega); + nn.get_neuron_by_vertex(v).get_pf().set_x(x); + nn.get_neuron_by_vertex(v).get_pf().set_r(r); + vs.push_back(v); + } + + for (size_t i = 0; i < 4; ++i) + { + nn.add_connection(vs[i], vs[i + 1], std::make_pair(5, phi_ij)); + nn.add_connection(vs[i + 1], vs[i], std::make_pair(5, -phi_ij)); + } + + + nn.init(); + for (size_t s = 0; s < 1000; ++s) + { + std::vector in; + nn.step(in); + for (size_t i = 0; i < vs.size(); ++i) + std::cout<< i << " " + << nn.get_neuron_by_vertex(vs[i]).get_pf().get_theta_i() + << std::endl; + } + // you should have beautiful oscillations + +} diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/trait.hpp b/modules/dnns_easily_fooled/sferes/modules/nn2/trait.hpp new file mode 100644 index 000000000..e5afa56e8 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/trait.hpp @@ -0,0 +1,127 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef _NN_TRAIT_HPP_ +#define _NN_TRAIT_HPP_ + +// for std::pair +#include +#include + +#ifdef EIGEN3_ENABLED +#include +#endif + +#include "params.hpp" + +namespace nn +{ + template + struct trait + { + static T zero() { return T(0.0f); } + // a 0 initializer for vectors + static T zero(size_t k) { return zero(); } + typedef std::valarray vector_t; + static size_t size(const T& t) { return t.size(); } + static typename T::type_t single_value(const T& t) { assert(t.size() == 1); return t.data(0); } + }; + + template<> + struct trait + { + typedef std::valarray vector_t; + static float zero() { return 0.0f; } + static float zero(size_t k) { return zero(); } + static float single_value(const params::Dummy& t) { return 0.0f; } + static size_t size(const params::Dummy&) { return 0; } + }; + + + // go with eigen with float (TODO : double) + template<> + struct trait + { + typedef Eigen::VectorXf vector_t; + static float zero() { return 0.0f; } + static vector_t zero(size_t k) { return Eigen::VectorXf::Zero(k); } + static float single_value(const float& t) { return t; } + static size_t size(const float& t) { return 1; } + }; + + template<> + struct trait > + { + typedef std::valarray > vector_t; + static std::pair zero() { return std::make_pair(0.0f, 0.0f); } + static std::pair zero(size_t k) { return zero(); } + static float single_value(const std::pair& t) { return t.first; } + static size_t size(const std::pair& t) { return 2; } + }; + + // useful but wrong place (?) + template + std::basic_ostream<_CharT, _Traits>& + operator<<(std::basic_ostream<_CharT, _Traits>& ofs, const std::pair& p) + { + return ofs< + std::basic_ostream<_CharT, _Traits>& + operator<<(std::basic_ostream<_CharT, _Traits>& ofs, const std::vector& p) + { + for (size_t i = 0; i < p.size(); ++i) + ofs< + std::istream& operator>>(std::istream& ifs, std::pair& p) + { + T1 t1; + T2 t2; + ifs >> t1; + ifs >> t2; + return std::make_pair(t1, t2); + } + +} + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/modules/nn2/wscript b/modules/dnns_easily_fooled/sferes/modules/nn2/wscript new file mode 100644 index 000000000..411a2b1ff --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/modules/nn2/wscript @@ -0,0 +1,114 @@ +#! /usr/bin/env python +#| This file is a part of the sferes2 framework. +#| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +#| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +#| +#| This software is a computer program whose purpose is to facilitate +#| experiments in evolutionary computation and evolutionary robotics. +#| +#| This software is governed by the CeCILL license under French law +#| and abiding by the rules of distribution of free software. You +#| can use, modify and/ or redistribute the software under the terms +#| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +#| following URL "http://www.cecill.info". +#| +#| As a counterpart to the access to the source code and rights to +#| copy, modify and redistribute granted by the license, users are +#| provided only with a limited warranty and the software's author, +#| the holder of the economic rights, and the successive licensors +#| have only limited liability. +#| +#| In this respect, the user's attention is drawn to the risks +#| associated with loading, using, modifying and/or developing or +#| reproducing the software by the user in light of its specific +#| status of free software, that may mean that it is complicated to +#| manipulate, and that also therefore means that it is reserved for +#| developers and experienced professionals having in-depth computer +#| knowledge. Users are therefore encouraged to load and test the +#| software's suitability as regards their requirements in conditions +#| enabling the security of their systems and/or data to be ensured +#| and, more generally, to use and operate it in the same conditions +#| as regards security. +#| +#| The fact that you are presently reading this means that you have +#| had knowledge of the CeCILL license and that you accept its terms. + + + + +import os + +def set_options(blah) : pass + +def configure(blah): pass + +def build(bld): + print ("Entering directory `" + os.getcwd() + "/modules/'") + test_nn = bld.new_task_gen('cxx', 'program') + test_nn.source = 'test_nn.cpp' + test_nn.includes = '. ../../' + test_nn.uselib_local = '' + test_nn.uselib = 'EIGEN3 BOOST BOOST_GRAPH BOOST_UNIT_TEST_FRAMEWORK' + test_nn.target = 'test_nn' + test_nn.unit_test = 1 + + test_dnn = bld.new_task_gen('cxx', 'program') + test_dnn.source = 'test_dnn.cpp' + test_dnn.includes = '. ../../' + test_dnn.uselib_local = 'sferes2' + test_dnn.uselib = 'EIGEN3 BOOST BOOST_GRAPH BOOST_UNIT_TEST_FRAMEWORK BOOST_SERIALIZATION' + test_dnn.target = 'test_dnn' + test_dnn.unit_test = 1 + + test_mlp = bld.new_task_gen('cxx', 'program') + test_mlp.source = 'test_mlp.cpp' + test_mlp.includes = '. ../../' + test_mlp.uselib_local = 'sferes2' + test_mlp.uselib = 'EIGEN3 BOOST BOOST_GRAPH BOOST_UNIT_TEST_FRAMEWORK BOOST_SERIALIZATION' + test_mlp.target = 'test_mlp' + test_mlp.unit_test = 1 + + test_esn = bld.new_task_gen('cxx', 'program') + test_esn.source = 'test_hyper_nn.cpp' + test_esn.includes = '. ../../' + test_esn.uselib_local = 'sferes2' + test_esn.uselib = 'EIGEN3 BOOST BOOST_GRAPH BOOST_UNIT_TEST_FRAMEWORK BOOST_SERIALIZATION' + test_esn.target = 'test_hyper_nn' + test_esn.unit_test = 1 + + + test_esn = bld.new_task_gen('cxx', 'program') + test_esn.source = 'test_dnn_ff.cpp' + test_esn.includes = '. ../../' + test_esn.uselib_local = 'sferes2' + test_esn.uselib = 'EIGEN3 BOOST BOOST_GRAPH BOOST_UNIT_TEST_FRAMEWORK BOOST_SERIALIZATION' + test_esn.target = 'test_dnn_ff' + test_esn.unit_test = 1 + + + + test_osc = bld.new_task_gen('cxx', 'program') + test_osc.source = 'test_osc.cpp' + test_osc.includes = '. ../../' + test_osc.uselib_local = 'sferes2' + test_osc.uselib = 'EIGEN3 BOOST BOOST_GRAPH BOOST_UNIT_TEST_FRAMEWORK BOOST_SERIALIZATION' + test_osc.target = 'test_osc' + test_osc.unit_test = 1 + + + bench_nn = bld.new_task_gen('cxx', 'program') + bench_nn.source = 'bench_nn.cpp' + bench_nn.includes = '. ../../' + bench_nn.uselib_local = 'sferes2' + bench_nn.uselib = 'EIGEN3 BOOST_GRAPH BOOST' + bench_nn.target = 'bench_nn' + + # Added tests by Anh -------------------------------------------------------------------- + test_esn = bld.new_task_gen('cxx', 'program') + test_esn.source = 'test_hyper_nn_anh.cpp' + test_esn.includes = '. ../../' + test_esn.uselib_local = 'sferes2' + test_esn.uselib = 'EIGEN3 BOOST BOOST_GRAPH BOOST_UNIT_TEST_FRAMEWORK BOOST_SERIALIZATION' + test_esn.target = 'test_hyper_nn_anh' + test_esn.unit_test = 1 + diff --git a/modules/dnns_easily_fooled/sferes/mpi.py b/modules/dnns_easily_fooled/sferes/mpi.py new file mode 100644 index 000000000..18ac951c2 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/mpi.py @@ -0,0 +1,43 @@ +#! /usr/bin/env python +# encoding: utf-8 +# JB Mouret - 2009 + +""" +Quick n dirty mpi detection +""" + +import os, glob, types +import Options, Configure + + +def detect_mpi(conf): + env = conf.env + opt = Options.options + + conf.env['LIB_MPI'] = '' + conf.env['MPI_FOUND'] = False + if Options.options.no_mpi : + return + if Options.options.mpi: + conf.env['CPPPATH_MPI'] = Options.options.mpi + '/include' + conf.env['LIBPATH_MPI'] = Options.options.mpi + '/lib' + else: + conf.env['CPPPATH_MPI'] = ['/usr/include/mpi', '/usr/local/include/mpi', '/usr/include', '/usr/local/include'] + conf.env['LIBPATH_MPI'] = ['/usr/lib', '/usr/local/lib', '/usr/lib/openmpi'] + + res = Configure.find_file('mpi.h', conf.env['CPPPATH_MPI'] ) + conf.check_message('header','mpi.h', (res != '') , res) + if (res == '') : + return 0 + conf.env['MPI_FOUND'] = True + conf.env['LIB_MPI'] = ['mpi_cxx','mpi'] + return 1 + +def detect(conf): + return detect_mpi(conf) + +def set_options(opt): + opt.add_option("--no-mpi", + default=False, action='store_true', + help='disable mpi', dest='no_mpi') + opt.add_option('--mpi', type='string', help='path to mpi', dest='mpi') diff --git a/modules/dnns_easily_fooled/sferes/ode.py b/modules/dnns_easily_fooled/sferes/ode.py new file mode 100644 index 000000000..4f3cb6752 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/ode.py @@ -0,0 +1,32 @@ + +#! /usr/bin/env python +# encoding: utf-8 +# JB Mouret - 2009 + +""" +Quick n dirty ODE detection +""" + +import os, glob, types +import Options, Configure, config_c + +import commands + +def detect_ode(conf): + env = conf.env + opt = Options.options + ret = conf.find_program('ode-config') + conf.check_message_1('Checking for ODE (optional)') + if not ret: + conf.check_message_2('not found', 'YELLOW') + return 0 + conf.check_message_2('ok') + res = commands.getoutput('ode-config --cflags --libs') + config_c.parse_flags(res, 'ODE', env) + return 1 + +def detect(conf): + return detect_ode(conf) + +def set_options(opt): + pass diff --git a/modules/dnns_easily_fooled/sferes/scripts/add_license.sh b/modules/dnns_easily_fooled/sferes/scripts/add_license.sh new file mode 100644 index 000000000..24892b216 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/scripts/add_license.sh @@ -0,0 +1,17 @@ +#!/bin/sh +set -x +PATHS='sferes scripts tests modules/nn modules/cartpole' +FILES=`find $PATHS |egrep "(.hpp|.cpp)$"` +for i in $FILES; do + grep -v "//|" $i >/tmp/file + cp scripts/license_cpp.txt $i + cat /tmp/file >> $i +done + +FILES=`find $PATHS |egrep "(wscript|.py)$"` +FILES="$FILES wscript" +for i in $FILES; do + grep -v "#|" $i|grep -v "#!" >/tmp/file + cat scripts/license_py.txt >> $i + cat /tmp/file >> $i +done diff --git a/modules/dnns_easily_fooled/sferes/scripts/error_parser.rb b/modules/dnns_easily_fooled/sferes/scripts/error_parser.rb new file mode 100755 index 000000000..614de75e5 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/scripts/error_parser.rb @@ -0,0 +1,72 @@ +#!/usr/bin/ruby +## error_parser.rb +## Login : +## Started on Fri Apr 4 12:28:15 2008 mandor +## $Id$ +## +## Copyright (C) 2008 mandor + + + +#| This file is a part of the sferes2 framework. +#| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +#| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +#| +#| This software is a computer program whose purpose is to facilitate +#| experiments in evolutionary computation and evolutionary robotics. +#| +#| This software is governed by the CeCILL license under French law +#| and abiding by the rules of distribution of free software. You +#| can use, modify and/ or redistribute the software under the terms +#| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +#| following URL "http://www.cecill.info". +#| +#| As a counterpart to the access to the source code and rights to +#| copy, modify and redistribute granted by the license, users are +#| provided only with a limited warranty and the software's author, +#| the holder of the economic rights, and the successive licensors +#| have only limited liability. +#| +#| In this respect, the user's attention is drawn to the risks +#| associated with loading, using, modifying and/or developing or +#| reproducing the software by the user in light of its specific +#| status of free software, that may mean that it is complicated to +#| manipulate, and that also therefore means that it is reserved for +#| developers and experienced professionals having in-depth computer +#| knowledge. Users are therefore encouraged to load and test the +#| software's suitability as regards their requirements in conditions +#| enabling the security of their systems and/or data to be ensured +#| and, more generally, to use and operate it in the same conditions +#| as regards security. +#| +#| The fact that you are presently reading this means that you have +#| had knowledge of the CeCILL license and that you accept its terms. + + + +def parse(line, level) + stack = [""] + if !line.include?("instantiated") and (line.include?("error") || line.include?("erreur")) then + line.scan(/./) { |c| + if c == '<' then + print "#{stack[0]}<" if stack.length < level + 2 + stack[0] = "" + stack.unshift("") + elsif c == '>' then + print stack[0] if stack.length < level + 2 + stack.shift() + print ">" + else + stack[0] += c if stack[0] + end + } + print stack[0] if stack[0] + print "\n" + + end +end + +level = ARGV[0].to_i +while $stdin.gets do + parse($_, level) +end diff --git a/modules/dnns_easily_fooled/sferes/scripts/license_cpp.txt b/modules/dnns_easily_fooled/sferes/scripts/license_cpp.txt new file mode 100644 index 000000000..275e58a82 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/scripts/license_cpp.txt @@ -0,0 +1,34 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + diff --git a/modules/dnns_easily_fooled/sferes/scripts/license_py.txt b/modules/dnns_easily_fooled/sferes/scripts/license_py.txt new file mode 100644 index 000000000..aeec49db9 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/scripts/license_py.txt @@ -0,0 +1,35 @@ +#! /usr/bin/env python +#| This file is a part of the sferes2 framework. +#| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +#| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +#| +#| This software is a computer program whose purpose is to facilitate +#| experiments in evolutionary computation and evolutionary robotics. +#| +#| This software is governed by the CeCILL license under French law +#| and abiding by the rules of distribution of free software. You +#| can use, modify and/ or redistribute the software under the terms +#| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +#| following URL "http://www.cecill.info". +#| +#| As a counterpart to the access to the source code and rights to +#| copy, modify and redistribute granted by the license, users are +#| provided only with a limited warranty and the software's author, +#| the holder of the economic rights, and the successive licensors +#| have only limited liability. +#| +#| In this respect, the user's attention is drawn to the risks +#| associated with loading, using, modifying and/or developing or +#| reproducing the software by the user in light of its specific +#| status of free software, that may mean that it is complicated to +#| manipulate, and that also therefore means that it is reserved for +#| developers and experienced professionals having in-depth computer +#| knowledge. Users are therefore encouraged to load and test the +#| software's suitability as regards their requirements in conditions +#| enabling the security of their systems and/or data to be ensured +#| and, more generally, to use and operate it in the same conditions +#| as regards security. +#| +#| The fact that you are presently reading this means that you have +#| had knowledge of the CeCILL license and that you accept its terms. + diff --git a/modules/dnns_easily_fooled/sferes/setup.sh b/modules/dnns_easily_fooled/sferes/setup.sh new file mode 100755 index 000000000..2c8581bcd --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/setup.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Setup the exp/images experiment +cd exp/images/ +./build_wscript.sh +cd ../../ + +dir=$(echo $(pwd)) +#echo "Please set LOCAL_RUN in $dir/exp/images/settings.h" +#vim $dir/exp/images/settings.h +echo "Done setting up." diff --git a/modules/dnns_easily_fooled/sferes/sferes.py b/modules/dnns_easily_fooled/sferes/sferes.py new file mode 100644 index 000000000..29b60a94f --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes.py @@ -0,0 +1,390 @@ +import sys, os +import subprocess +import commands + +json_ok = True +try: + import simplejson +except: + json_ok = False + print "WARNING simplejson not found some function may not work" + +import glob +#import xml.etree.cElementTree as etree +import Options + + + +def create_variants(bld, source, uselib_local, target, + uselib, variants, includes=". ../../", + cxxflags='', + json=''): + # the basic one + # tgt = bld.new_task_gen('cxx', 'program') + # tgt.source = source + # tgt.includes = includes + # tgt.uselib_local = uselib_local + # tgt.uselib = uselib + # tgt.target = target + # the variants + c_src = bld.path.abspath() + '/' + for v in variants: + # create file + suff = '' + for d in v.split(' '): suff += d.lower() + '_' + tmp = source.replace('.cpp', '') + src_fname = tmp + '_' + suff[0:len(suff) - 1] + '.cpp' + f = open(c_src + src_fname, 'w') + f.write("// THIS IS A GENERATED FILE - DO NOT EDIT\n") + for d in v.split(' '): f.write("#define " + d + "\n") + f.write("#line 1 \"" + c_src + source + "\"\n") + code = open(c_src + source, 'r') + for line in code: f.write(line) + bin_name = src_fname.replace('.cpp', '') + bin_name = os.path.basename(bin_name) + # create build + tgt = bld.new_task_gen('cxx', 'program') + tgt.source = src_fname + tgt.includes = includes + tgt.uselib_local = uselib_local + tgt.uselib = uselib + tgt.target = bin_name + tgt.cxxflags = cxxflags + + +def create_exp(name): + ws_tpl = """ +#! /usr/bin/env python +def build(bld): + obj = bld.new_task_gen('cxx', 'program') + obj.source = '@exp.cpp' + obj.includes = '. ../../' + obj.uselib_local = 'sferes2' + obj.uselib = '' + obj.target = '@exp' + obj.uselib_local = 'sferes2' +""" + os.mkdir('exp/' + name) + os.system("cp examples/ex_ea.cpp exp/" + name + "/" + name + ".cpp") + wscript = open('exp/' + name + "/wscript", "w") + wscript.write(ws_tpl.replace('@exp', name)) + + +def parse_modules(): + if (not os.path.exists("modules.conf")): + return [] + mod = open("modules.conf") + modules = [] + for i in mod: + if i[0] != '#' and len(i) != 1: + modules += ['modules/' + i[0:len(i)-1]] + return modules + +def qsub(conf_file): + tpl = """ +#! /bin/sh +#? nom du job affiche +#PBS -N @exp +#PBS -o stdout +#PBS -b stderr +#PBS -M @email +# maximum execution time +#PBS -l walltime=@wall_time +# mail parameters +#PBS -m abe +# number of nodes +#PBS -l nodes=@nb_cores:ppn=@ppn +#PBS -l pmem=5200mb -l mem=5200mb +export LD_LIBRARY_PATH=@ld_lib_path +exec @exec +""" + if os.environ.has_key('LD_LIBRARY_PATH'): + ld_lib_path = os.environ['LD_LIBRARY_PATH'] + else: + ld_lib_path = "''" + home = os.environ['HOME'] + print 'LD_LIBRARY_PATH=' + ld_lib_path + # parse conf + conf = simplejson.load(open(conf_file)) + exps = conf['exps'] + nb_runs = conf['nb_runs'] + res_dir = conf['res_dir'] + bin_dir = conf['bin_dir'] + wall_time = conf['wall_time'] + use_mpi = "false" + try: use_mpi = conf['use_mpi'] + except: use_mpi = "false" + try: nb_cores = conf['nb_cores'] + except: nb_cores = 1 + try: args = conf['args'] + except: args = '' + email = conf['email'] + if (use_mpi == "true"): + ppn = '1' + mpirun = 'mpirun' + else: + nb_cores = 1; + ppn = '8' + mpirun = '' + + for i in range(0, nb_runs): + for e in exps: + directory = res_dir + "/" + e + "/exp_" + str(i) + try: + os.makedirs(directory) + except: + print "WARNING, dir:" + directory + " not be created" + subprocess.call('cp ' + bin_dir + '/' + e + ' ' + directory, shell=True) + fname = home + "/tmp/" + e + "_" + str(i) + ".job" + f = open(fname, "w") + f.write(tpl + .replace("@exp", e) + .replace("@email", email) + .replace("@ld_lib_path", ld_lib_path) + .replace("@wall_time", wall_time) + .replace("@dir", directory) + .replace("@nb_cores", str(nb_cores)) + .replace("@ppn", ppn) + .replace("@exec", mpirun + ' ' + directory + '/' + e + ' ' + args)) + f.close() + s = "qsub -d " + directory + " " + fname + print "executing:" + s + retcode = subprocess.call(s, shell=True, env=None) + print "qsub returned:" + str(retcode) + +def loadleveler(conf_file): + tpl = """ +# @ job_name= +# @ output = $(job_name).$(jobid) +# @ error = $(output) +# @ job_type = serial +# @ class = +# @ resources=ConsumableMemory() ConsumableCpus() +# @ queue +export LD_LIBRARY_PATH= +cd +./ +""" + if os.environ.has_key('LD_LIBRARY_PATH'): + ld_lib_path = os.environ['LD_LIBRARY_PATH'] + else: + ld_lib_path = "''" + home = os.environ['HOME'] + print 'LD_LIBRARY_PATH=' + ld_lib_path + # parse conf + conf = simplejson.load(open(conf_file)) + jobname = conf['jobname'] + exps = conf['exps'] + nb_runs = conf['nb_runs'] + res_dir = conf['res_dir'] + bin_dir = conf['bin_dir'] + jobclass = conf['class'] + try: + memory=conf['memory'] + except: + memory=3000 + try: + cpu=conf['cpu'] + except: + cpu=1 + + for i in range(0, nb_runs): + for e in exps: + directory = res_dir + "/" + e + "/exp_" + str(i) + try: + os.makedirs(directory) + except: + print "WARNING, dir:" + directory + " cannot be created" + subprocess.call('cp ' + bin_dir + '/' + e + ' ' + directory, shell=True) + try: + os.makedirs(home+"/tmp") + except: + pass + fname = home + "/tmp/" + e + "_" + str(i) + ".job" + f = open(fname, "w") + f.write(tpl + .replace("", jobname) + .replace("", ld_lib_path) + .replace("", jobclass) + .replace("", directory) + .replace("", str(memory)) + .replace("", str(cpu)) + .replace("", e)) + f.close() + s = "llsubmit "+ fname + print "executing:" + s + retcode = subprocess.call(s, shell=True, env=None) + print "llsubmit returned:" + str(retcode) + + + + +def time_travel(conf_file): + print 'time_travel, conf = ' + conf_file + conf = simplejson.load(open(conf_file)) + dir = conf['dir'] + # get the diff + patch = glob.glob(dir + '/*.diff')[0].split('/') + patch = patch[len(patch) - 1] + version = patch[0:len(patch) - len('.diff')] + cwd = os.getcwd() + patch = cwd + '/' + dir + '/' + patch + # checkout + print 'svn co -r ' + version + os.system('cd ' + dir + ' && svn -r ' + version + ' co https://webia.lip6.fr:2004/svn/robur/sferes2') + os.system('cd ' + dir + '/sferes2 && patch -p0 < '+ patch) + os.chdir(cwd) + + +def get_gen(x): + g1 = x.split('_') + gen1 = int(g1[len(g1)-1]) + return gen1 + + +def get_exe(conf_file): + return os.path.split(conf_file.replace('.json', ''))[1] + + +def compare_gen(x, y): + return get_gen(x) - get_gen(y) + + +def kill(conf_file): + print 'kill, conf =' + conf_file + exe = get_exe(conf_file) + conf = simplejson.load(open(conf_file)) + machines = conf['machines'] + if conf['debug'] == 1: + exe += '_debug' + else: + exe += '_opt' + print 'kill '+ exe + for m in machines: + print m + s = "ssh -o CheckHostIP=false -f " + m + \ + " killall -9 " + exe + print s + os.system(s) + + + +def status(conf): + # parse configuration + print 'status, conf = ' + conf + conf = simplejson.load(open(conf)) + exp = conf['exp'] + dir = conf['dir'] + + exps = glob.glob(dir + '/exp_*/') + total = 0.0 + for i in exps: + glist = glob.glob(i + '*/gen_*') + glist.sort(cmp=compare_gen) + last = glist[len(glist) - 1] + last_gen = get_gen(last) + r = '' + try: + tree = etree.parse(last) + l = tree.find("//x/_pareto_front/item/px/_fit/_objs") + if l == None: + l = tree.find("//x/_best/px/_fit/_value") + total += float(l.text) + r = l.text + else: + l = l[1:len(l)] + total += float(l[0].text) + for k in l: + r += k.text + ' ' + print i + ' :\t' + str(last_gen) + '\t=> ' + r + except: + print "error" + total /= len(exps) + print "=> " + str(total) + + +def get_exp(conf_file): + conf = simplejson.load(open(conf_file)) + return conf['exp'].sub('exp/', '') + +def launch_exp(conf_file): + print '--- launch exp ---' + + # parse configuration + print 'launch, conf = ' + conf_file + conf = simplejson.load(open(conf_file)) + machines = conf['machines'] + nb_runs = conf['nb_runs'] + exp = conf['exp'] + directory = conf['dir'] + debug = conf['debug'] + + args = "" + if 'args' in conf : args=conf['args'] + print 'exp = ' + exp + print 'dir = ' + directory + print 'nb_runs = ' + str(nb_runs) + print 'debug = ' + str(debug) + print 'machines =' + str(machines) + print 'args =' + str(args) + + # copy binaries (debug and opt) & json file + exe = get_exe(conf_file) + try: + os.makedirs(directory + '/bin') + os.system('cp ' + 'build/default/' + exp +'/'+exe+ ' ' + directory + '/bin/' + exe + '_opt') + os.system('cp ' + 'build/debug/' + exp +'/'+exe+ ' ' + directory + '/bin/' + exe + '_debug') + print conf + print directory + os.system('cp ' + conf_file + ' ' + directory) + # create directories + for i in range(0, nb_runs * len(machines)): + os.makedirs(directory + '/exp_' + str(i)) + except: + print '/!\ files exist, I cannot replace them' + return + print 'dirs created' + + # make a svn diff + status, version = commands.getstatusoutput('svnversion') + if version[len(version)-1] == 'M': + version = version[0:len(version)-1] + os.system('svn diff >' + directory + '/' + version + '.diff') + print 'diff done [version=' + version + ']' + + # run on each machines + if debug == 1: + exe = exe + '_debug' + else: + exe = exe + '_opt' + if os.environ.has_key('LD_LIBRARY_PATH'): + ld_lib_path = os.environ['LD_LIBRARY_PATH'] + else: + ld_lib_path = "''" + k = 0 + pids = [] + for m in machines.iterkeys() : + pid = os.fork() + if (pid == 0): #son + for i in range(0, machines[m]): + if m == 'localhost': + s = "export LD_LIBRARY_PATH=" + ld_lib_path + \ + " && cd " + os.getcwd() + '/' + directory + '/exp_'+ str(i + k) + \ + " && " + os.getcwd() + '/' + directory + '/bin/' + exe + " " + args + \ + " 1> stdout 2> stderr" + else: + s = "ssh -o CheckHostIP=false " + m + \ + " 'export LD_LIBRARY_PATH=" + ld_lib_path + \ + " && cd " + os.getcwd() + '/' + directory + '/exp_'+ str(i + k) + \ + " && " + os.getcwd() + '/' + directory + '/bin/' + exe + " " + args + \ + " 1> stdout 2> stderr'" + print 'run ' + str(i + k) + ' on ' + m + print s + ret = subprocess.call(s, shell=True) + print "ret = " + str(ret) + exit(0) + pids += [pid] + k += machines[m] + print "waitpid..." + for i in pids: + os.waitpid(i, 0) diff --git a/modules/dnns_easily_fooled/sferes/sferes/dbg/dbg.cpp b/modules/dnns_easily_fooled/sferes/sferes/dbg/dbg.cpp new file mode 100644 index 000000000..b93418489 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/dbg/dbg.cpp @@ -0,0 +1,1279 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +/* + * File: dbg.cpp + * Author: Pete Goodliffe + * Version: 1.10 + * Created: 7 June 2001 + * + * Purpose: C++ debugging support library + * + * Copyright (c) Pete Goodliffe 2001-2002 (pete@cthree.org) + * + * This file is modifiable/redistributable under the terms of the GNU + * Lesser General Public License. + * + * You should have recieved a copy of the GNU General Public License along + * with this program; see the file COPYING. If not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 0211-1307, USA. + */ + +#ifndef DBG_ENABLED +#define DBG_ENABLED +#endif + +#include "dbg.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define MAX_RETADDR 30 +void print_stack() { + void* retaddr[MAX_RETADDR]; + char **funcnames; + + int stackdepth; + + stackdepth = backtrace(retaddr, MAX_RETADDR); + funcnames = backtrace_symbols(retaddr, stackdepth); + + int i = 0; + while (i < stackdepth) { + std::cout << funcnames[i] << std::endl; + i++; + } +} + +/********************************************************************** + * Implementation notes + ********************************************************************** + * Tested and found to work ok under + * - gcc 2.96 + * - gcc 3.0 + * - gcc 3.1 + * - gcc 3.2 + * - bcc32 5.5.1 + * - MSVC 6.0 + * + * MSVC v6.0 + * - This platform makes me cry. + * - There are NUMEROUS hacks around it's deficient behaviour, just + * look for conditional complation based around _MSC_VER + * - The header doesn't put all the definitions into the std + * namespace. + * - This means that we have to sacrifice our good namespace-based code + * for something more disgusting and primitve. + * - Where this has happened, and where in the future I'd really like to + * put the "std" namespace back in, I have instead used a STDCLK macro. + * See the implementation comment about this below for more grief. + * - A documented hack has been made in the dbg.h header file, of slightly + * less ghastly proportions. See dbgclock_t there. + * - Additionally, the dbg::array_size template utility could be (and was) + * more elegantly be written: + * template + * inline unsigned int array_size(T (&array)[size]) + * { + * return size; + * } + * Of course, MSVC doesn't like that. Sigh. The version in dbg.h also + * works, its just not quite so nice. + * - The map implentation of MSVC doesn't provide data_type, so I have to + * hack around that. + * - The compiler doesn't like the dbg_ostream calling it's parent + * constructor by the name "ostream", it doesn't recognise the typedef. + * Ugh. + * + * Other thoughts: + * - Break out to debugger facility? + * - Only works for ostreams, not all basic_ostreams + * - Post-conditions are a bit limited, this is more of a C++ + * language limitation, really. + *********************************************************************/ + +/****************************************************************************** + * Tedious compiler-specific issues + *****************************************************************************/ + +// Work around MSVC 6.0 +#ifdef _MSC_VER +#define STDCLK +#pragma warning(disable:4786) +#else +// In an ideal world, the following line would be +// namespace STDCLK = std; +// However, gcc 2.96 doesn't seem to cope well with namespace aliases. +// Sigh. +#define STDCLK std +#endif + +// Quieten tedius build warnings on Borland C++ compiler +#ifdef __BCPLUSPLUS__ +#pragma warn -8066 +#pragma warn -8071 +#pragma warn -8070 +#endif + +/****************************************************************************** + * General dbg library private declarations + *****************************************************************************/ + +namespace { + /************************************************************************** + * Constants + *************************************************************************/ + + const char *LEVEL_NAMES[] = { + "info", + "warning", + "error", + "fatal", + "tracing", + "debug", + "none", + "all" + }; + const char *BEHAVIOUR_NAMES[] = { + "assertions_abort", + "assertions_throw", + "assertions_continue" + }; + enum constraint_type { + why_assertion, + why_sentinel, + why_unimplemented, + why_check_ptr + }; + + const char *TRACE_IN = "->"; + const char *TRACE_OUT = "<-"; + const char *INDENT = " "; + const char *PREFIX = "*** "; + const char *TRUE_STRING = "true"; + const char *FALSE_STRING = "false"; + const unsigned int ALL_SOURCES_MASK = 0xff; + const unsigned int NUM_DBG_LEVELS = dbg::all-1; + + /************************************************************************** + * Internal types + *************************************************************************/ + + /** + * Period information about a particular @ref source_pos. Used if + * assertion periods are enabled. + * + * @internal + */ + struct period_data { + size_t no_triggers; + STDCLK::clock_t triggered_at; + + period_data(); + }; + + /** + * Functor to provide comparison of @ref dbg::source_pos structs. + * + * @internal + */ + struct lt_sp { + bool operator()(const dbg::source_pos &a, const dbg::source_pos &b) + const { + if (a.file == b.file) { + if (a.func == b.func) { + return a.line < b.line; + } else { + return a.func < b.func; + } + } else { + return a.file < b.file; + } + } + }; + + /** + * A handy std::streambuf that sends its input to multiple output streams. + * + * This is the cornerstone of the dbg output stream magic. + * + * The multiple streams are recorded in a std::vector. The vector is + * external to this class, and maintained by the @ref dbg_ostream which + * uses this streambuf. + * + * @internal + */ + class dbg_streambuf : public std::streambuf { + public: + + dbg_streambuf(std::vector &ostreams, int bsize = 0); + ~dbg_streambuf(); + + int pubsync() { + return sync(); + } + + protected: + + int overflow(int); + int sync(); + + private: + + void put_buffer(void); + void put_char(int); + + std::vector &ostreams; + }; + + /** + * A handy streambuf that swallows its output and doesn't burp. + * + * This is used for disabled diagnostic output streams. + * + * There is a single stream object created using this streambuf, + * null_ostream. + * + * @internal + */ + class null_streambuf : public std::streambuf { + public: + + null_streambuf() {} + ~null_streambuf() {} + + protected: + + int overflow(int) { + return 0; + } + int sync() { + return 0; + } + }; + + /** + * This class provides an ostream using the @ref dbg_streambuf. It manages + * the vector on which the @ref dbg_streambuf relies. It provides methods + * to add/remove and clear streams. + * + * There are some minor hacks to support MSVC which cause small headaches. + * + * @internal + */ + class dbg_ostream : public std::ostream { + public: + +#ifndef _MSC_VER + dbg_ostream() : std::ostream(&dbg_buf), dbg_buf(streams) {} + dbg_ostream(const dbg_ostream &rhs) + : std::ostream(&dbg_buf), streams(rhs.streams), + dbg_buf(streams) {} +#else + // MSVC workaround. Sigh. It won't let us call the parent ctor as + // "ostream" - it doesn't like the use of a typedef. On the other + // hand gcc 2.96 doesn't provide basic_ostream, so I can't call the + // base basic_ostream<> class there. + dbg_ostream() + : std::basic_ostream(&dbg_buf), dbg_buf(streams) {} + dbg_ostream(const dbg_ostream &rhs) + : std::basic_ostream(&dbg_buf), streams(rhs.streams), + dbg_buf(streams) {} +#endif + ~dbg_ostream() { + dbg_buf.pubsync(); + } + + void add(std::ostream &o); + void remove(std::ostream &o); + void clear(); + + private: + + dbg_ostream &operator=(const dbg_ostream&); + + typedef std::vector stream_vec_type; + + stream_vec_type streams; + dbg_streambuf dbg_buf; + }; + + /** + * The source_info class holds all the information associated with a + * debugging source. That is, the level enable settings and the + * set of output streams for each level. + * + * source_info objects are held in the @ref source_map. The default + * constructor magically takes settings from the @ref dbg::default_source. + * This actually makes the constructor code a little tortuous, but it's + * worth it. + * + * @internal + */ + class source_info { + public: + + /** + * The source_info can either be constructed as a copy of the + * default_source or not. The only time we choose not to is + * when constructing default_source! + */ + enum ConstructionStyle { + ConstructTheDefaultSource = 0, + ConstructCopyOfDefaultSource = 1 + }; + + source_info(ConstructionStyle cs = ConstructCopyOfDefaultSource); + source_info(const source_info &rhs); + ~source_info(); + + /** + * Enable or disable the given level, depending on the value + * of the boolean. + */ + void enable(dbg::level lvl, bool enable); + + /** + * Returns whether or not the level is enabled. + */ + bool enabled(dbg::level lvl) const { + return (levels & dbg_source_mask(lvl)) != 0; + } + + /** + * Add an ostream to catch output at the specified level. + */ + void add_ostream(dbg::level lvl, std::ostream &o); + + /** + * Remove an ostream from the specified level. + */ + void remove_ostream(dbg::level lvl, std::ostream &o); + + /** + * Clear all ostreams from the given level. + */ + void clear_ostream(dbg::level lvl); + + /** + * Return a stream to write to for the given level, or the + * @ref null_ostream if the level is not enabled for this source. + */ + std::ostream &out(dbg::level lvl); + + private: + + /** + * Creates a unsigned int mask that is used as the second value + * of the sources map. + */ + static unsigned int dbg_source_mask(dbg::level lvl) { + return (lvl != dbg::all) ? 1 << lvl : ALL_SOURCES_MASK; + } + + unsigned int levels; + + // We do a placement new of the dbg_streams array. + // It looks somewhat tacky, but it allows us to have a single + // constructor, which simplifies the client interface of this class. + // It specifically avoids tonnes of grotesque unused dbg_ostream + // constructions, as you'd create an array, and then copy the + // default_source elements directly over these freshly constructed + // elements. dbg_ostream is complex enough that this matters. + // If we didn't have a clever cloning constructor, these lines + // would just be "dbg_ostream dbg_streams[NUM_DBG_LEVELS];" and + // we'd suffer a whole load of redundant dbg_ostream constructions. + /* + typedef dbg_ostream array_type[NUM_DBG_LEVELS]; + dbg_ostream *dbg_streams; + unsigned char raw_dbg_streams[sizeof(array_type)]; + */ + struct array_type { + // I wrap this up in an enclosing struct to make it obvious + // how to destroy the array "in place". To be honest I couldn't + // figure the syntax for the corresponding delete for a + // placement new array constrution. + dbg_ostream dbg_streams[NUM_DBG_LEVELS]; + }; + dbg_ostream *dbg_streams; + unsigned char raw_dbg_streams[sizeof(array_type)]; + + array_type &raw_cast() { + return *reinterpret_cast(raw_dbg_streams); + } + const array_type &raw_cast() const { + return *reinterpret_cast(raw_dbg_streams); + } + }; + + /** + * This class provides a "map" type for the source_map. It's a thin veneer + * around a std::map. The only reason for it's existance is the ctor which + * creates the default source_info, and inserts it into the map. + * + * Only the member functions that are needed below have been implemented, + * each as forwards to the std::map methods. + * + * @internal + */ + class source_map_type { + public: + + typedef std::map map_type; + typedef map_type::iterator iterator; + typedef map_type::key_type key_type; + typedef source_info data_type; + source_map_type() { + // Insert the default_source into the map + _map.insert( + std::make_pair(dbg::default_source, + source_info(source_info::ConstructTheDefaultSource))); + // Insert the unnamed source into the map too + _map.insert( + std::make_pair(dbg::dbg_source(""), + source_info(source_info::ConstructTheDefaultSource))); + } + iterator begin() { + return _map.begin(); + } + iterator end() { + return _map.end(); + } + data_type &operator[](key_type key) { + return _map[key]; + } + + private: + + map_type _map; + }; + + typedef std::map period_map_type; + + /************************************************************************** + * Internal variables + *************************************************************************/ + + // The stream to write to when no output is required. + std::ostream null_ostream(new null_streambuf()); + + dbg::assertion_behaviour behaviour[dbg::all+1] = { + dbg::assertions_abort, + dbg::assertions_abort, + dbg::assertions_abort, + dbg::assertions_abort, + dbg::assertions_abort, + dbg::assertions_abort, + dbg::assertions_abort, + dbg::assertions_abort + }; + + unsigned int indent_depth = 0; + std::string indent_prefix = PREFIX; + bool level_prefix = false; + bool time_prefix = false; + STDCLK::clock_t period = 0; + source_map_type source_map; + period_map_type period_map; + + /************************************************************************** + * Function declarations + *************************************************************************/ + + /** + * Prints a source_pos to the given ostream. + */ + void print_pos(std::ostream &out, const dbg::source_pos &where); + + /** + * Prints a source_pos to the given ostream in short format + * (suitable for trace). + */ + void print_pos_short(std::ostream &out, const dbg::source_pos &where); + + /** + * Prints period information to the given ostream, if a period has been + * enabled. + */ + void print_period_info(std::ostream &out, const dbg::source_pos &where); + + /** + * Does whatever the assertion_behaviour is set to. If an assertion + * is triggered, then this will be called. + */ + void do_assertion_behaviour(dbg::level lvl, constraint_type why, + const dbg::source_pos &pos); + + /** + * Produces a level prefix for the specified level to the + * given ostream. + */ + void do_prefix(dbg::level lvl, std::ostream &s); + + /** + * Used by period_allows below. + */ + bool period_allows_impl(const dbg::source_pos &where); + + /** + * Returns whether the period allows the constraint at the specified + * @ref dbg::source_pos to trigger. This presumes that the assertion + * at this position has shown to be broken already. + * + * This is a small inline function to make code that uses the period + * implementation easier to read. + */ + inline bool period_allows(const dbg::source_pos &where) { + return !period || period_allows_impl(where); + } + + /** + * Given a dbg_source (which could be a zero pointer, or a source string) + * and the source_pos (which could contain a DBG_SOURCE defintion, or + * zero), work out what the dbg_source name to use is. + */ + void determine_source(dbg::dbg_source &src, const dbg::source_pos &here); +} + + +/****************************************************************************** + * Miscellaneous public bobbins + *****************************************************************************/ + +dbg::dbg_source dbg::default_source = "dbg::private::default_source"; + + +/****************************************************************************** + * Enable/disable dbg facilities + *****************************************************************************/ +void dbg::init() { + // debug + enable_level_prefix(true); + enable_all(dbg::all, true); + set_prefix("|"); + +} + +void dbg::enable(dbg::level lvl, bool enabled) { + out(debug) << prefix(debug) << "dbg::enable(" << LEVEL_NAMES[lvl] + << "," << (enabled ? TRUE_STRING : FALSE_STRING) << ")\n"; + + source_map[""].enable(lvl, enabled); +} + + +void dbg::enable(dbg::level lvl, dbg::dbg_source src, bool enabled) { + out(debug) << prefix(debug) << "dbg::enable(" << LEVEL_NAMES[lvl] + << ",\"" << src << "\"," + << (enabled ? TRUE_STRING : FALSE_STRING) << ")\n"; + + source_map[src].enable(lvl, enabled); +} + + +void dbg::enable_all(dbg::level lvl, bool enabled) { + out(debug) << prefix(debug) << "dbg::enable_all(" + << LEVEL_NAMES[lvl] << "," + << (enabled ? TRUE_STRING : FALSE_STRING) << ")\n"; + + source_map_type::iterator i = source_map.begin(); + for ( ; i != source_map.end(); ++i) { + (i->second).enable(lvl, enabled); + } +} + + +/****************************************************************************** + * Logging + *****************************************************************************/ + +std::ostream &dbg::out(dbg::level lvl, dbg::dbg_source src) { + if (!src) + return (source_map[""].out(lvl)<= 0 && index >= bound + && period_allows(here)) { + std::ostream &o = out(lvl, src); + o << indent(lvl) << "index " << index << " is out of bounds (" + << bound << ") at "; + print_pos(o, here); + print_period_info(o, here); + o << "\n"; + + do_assertion_behaviour(lvl, why_check_ptr, here); + } +} + + +/****************************************************************************** + * Tracing + *****************************************************************************/ + +dbg::trace::trace(func_name_t name) + : m_src(0), m_name(name), m_pos(DBG_HERE), m_triggered(false) { + determine_source(m_src, m_pos); + + if (source_map[m_src].enabled(dbg::tracing)) { + trace_begin(); + } +} + + +dbg::trace::trace(dbg_source src, func_name_t name) + : m_src(src), m_name(name), m_pos(DBG_HERE), m_triggered(false) { + determine_source(m_src, m_pos); + + if (source_map[m_src].enabled(dbg::tracing)) { + trace_begin(); + } +} + + +dbg::trace::trace(const source_pos &where) + : m_src(0), m_name(0), m_pos(where), m_triggered(false) { + determine_source(m_src, m_pos); + + if (source_map[m_src].enabled(dbg::tracing)) { + trace_begin(); + } +} + + +dbg::trace::trace(dbg_source src, const source_pos &where) + : m_src(src), m_name(0), m_pos(where), m_triggered(false) { + determine_source(m_src, m_pos); + + if (source_map[src].enabled(dbg::tracing)) { + trace_begin(); + } +} + + +dbg::trace::~trace() { + if (m_triggered) { + trace_end(); + } +} + + +void dbg::trace::trace_begin() { + std::ostream &o = out(dbg::tracing, m_src); + o << indent(tracing); + indent_depth++; + o << TRACE_IN; + if (m_name) { + o << m_name; + } else { + print_pos_short(o, m_pos); + } + // if (m_src && strcmp(m_src, "")) + // { + // o << " (for \"" << m_src << "\")"; + // } + o << std::endl; + + m_triggered = true; +} + + +void dbg::trace::trace_end() { + std::ostream &o = out(dbg::tracing, m_src); + indent_depth--; + o << indent(tracing); + o << TRACE_OUT; + if (m_name) { + o << m_name; + } else { + print_pos_short(o, m_pos); + } + // if (m_src && strcmp(m_src, "")) + // { + // o << " (for \"" << m_src << "\")"; + // } + o << std::endl; +} + + +/****************************************************************************** + * Internal implementation + *****************************************************************************/ + +namespace { + /************************************************************************** + * dbg_streambuf + *************************************************************************/ + + dbg_streambuf::dbg_streambuf(std::vector &o, int bsize) + : ostreams(o) { + if (bsize) { + char *ptr = new char[bsize]; + setp(ptr, ptr + bsize); + } else { + setp(0, 0); + } + setg(0, 0, 0); + } + + dbg_streambuf::~dbg_streambuf() { + sync(); + delete [] pbase(); + } + + int dbg_streambuf::overflow(int c) { + put_buffer(); + if (c != EOF) { + if (pbase() == epptr()) { + put_char(c); + } else { + sputc(c); + } + } + return 0; + } + + int dbg_streambuf::sync() { + put_buffer(); + return 0; + } + + void dbg_streambuf::put_buffer(void) { + if (pbase() != pptr()) { + std::vector::iterator i = ostreams.begin(); + while (i != ostreams.end()) { + (*i)->write(pbase(), pptr() - pbase()); + ++i; + } + setp(pbase(), epptr()); + } + } + + void dbg_streambuf::put_char(int c) { + std::vector::iterator i = ostreams.begin(); + while (i != ostreams.end()) { + (**i) << static_cast(c); + ++i; + } + } + + + /************************************************************************** + * dbg_ostream + *************************************************************************/ + + void dbg_ostream::add(std::ostream &o) { + if (std::find(streams.begin(), streams.end(), &o) == streams.end()) { + streams.push_back(&o); + } + } + + void dbg_ostream::remove(std::ostream &o) { + stream_vec_type::iterator i + = std::find(streams.begin(), streams.end(), &o); + if (i != streams.end()) { + streams.erase(i); + } + } + + void dbg_ostream::clear() { + streams.clear(); + } + + + /************************************************************************** + * source_info + *************************************************************************/ + + source_info::source_info(ConstructionStyle cs) + : levels(cs ? source_map[dbg::default_source].levels : 0), + dbg_streams(raw_cast().dbg_streams) { + if (cs) { + new (raw_dbg_streams) + array_type(source_map[dbg::default_source].raw_cast()); + } else { + new (raw_dbg_streams) array_type; + // add cerr to the error and fatal levels. + add_ostream(dbg::error, std::cerr); + add_ostream(dbg::fatal, std::cerr); + } + } + + source_info::source_info(const source_info &rhs) + : levels(rhs.levels), dbg_streams(raw_cast().dbg_streams) { + new (raw_dbg_streams) array_type(rhs.raw_cast()); + } + + source_info::~source_info() { + raw_cast().~array_type(); + } + + void source_info::enable(dbg::level lvl, bool status) { + levels &= ~dbg_source_mask(lvl); + if (status) { + levels |= dbg_source_mask(lvl); + } + } + + void source_info::add_ostream(dbg::level lvl, std::ostream &o) { + if (lvl == dbg::all) { + for (unsigned int n = 0; n < NUM_DBG_LEVELS; ++n) { + dbg_streams[n].add(o); + } + } else { + dbg_streams[lvl].add(o); + } + } + + void source_info::remove_ostream(dbg::level lvl, std::ostream &o) { + if (lvl == dbg::all) { + for (unsigned int n = 0; n < NUM_DBG_LEVELS; ++n) { + dbg_streams[n].remove(o); + } + } else { + dbg_streams[lvl].remove(o); + } + } + + void source_info::clear_ostream(dbg::level lvl) { + if (lvl == dbg::all) { + for (unsigned int n = 0; n < NUM_DBG_LEVELS; ++n) { + dbg_streams[n].clear(); + } + } else { + dbg_streams[lvl].clear(); + } + } + + std::ostream &source_info::out(dbg::level lvl) { + if (lvl == dbg::none || !enabled(lvl)) { + return null_ostream; + } else { + return dbg_streams[lvl]; + } + } + + + /************************************************************************** + * period_data + *************************************************************************/ + + period_data::period_data() + : no_triggers(0), triggered_at(STDCLK::clock() - period*2) { + } + + + /************************************************************************** + * Functions + *************************************************************************/ + + void print_pos(std::ostream &out, const dbg::source_pos &where) { + if (where.file) { + if (where.func) { + out << "function: " << where.func << ", "; + } + out << "line: " << where.line << ", file: " << where.file; + } + } + + void print_pos_short(std::ostream &out, const dbg::source_pos &where) { + if (where.file) { + if (where.func) { + out << where.func << " (" << where.line + << " in " << where.file << ")"; + } else { + out << "function at (" << where.line + << " in " << where.file << ")"; + } + } + } + + void print_period_info(std::ostream &out, const dbg::source_pos &where) { + if (period) { + size_t no_triggers = period_map[where].no_triggers; + out << " (triggered " << no_triggers << " time"; + if (no_triggers > 1) { + out << "s)"; + } else { + out << ")"; + } + } + } + + void do_assertion_behaviour(dbg::level lvl, constraint_type why, + const dbg::source_pos &pos) { + switch (lvl != dbg::fatal ? behaviour[lvl] : dbg::assertions_abort) { + case dbg::assertions_abort: { + abort(); + break; + } + case dbg::assertions_throw: { + switch (why) { + default: + case why_assertion: { + throw dbg::assertion_exception(pos); + break; + } + case why_sentinel: { + throw dbg::sentinel_exception(pos); + break; + } + case why_unimplemented: { + throw dbg::unimplemented_exception(pos); + break; + } + case why_check_ptr: { + throw dbg::check_ptr_exception(pos); + break; + } + } + break; + } + case dbg::assertions_continue: + default: { + break; + } + } + } + + void do_prefix(dbg::level lvl, std::ostream &s) { + + if (level_prefix) { + switch (lvl) { + case dbg::info: { + s << COL_GREEN<< "info: "< +#include +#include +#include + +#ifndef DBG_COLORS +#define DBG_COLORS +#define COL_RED "\033[1;33;41m" +#define COL_GREEN "\033[32m" +#define COL_BLACK "\033[30m" +#define COL_ORANGE "\033[33m" +#define COL_BLUE "\033[34m" +#define COL_MAGENTA "\033[35m" +#define COL_CYAN "\033[36m" +#define END_COLOR "\033[m" +#endif + +#ifndef _MSC_VER +#include +#else +// The start of a MSVC compatibility disaster area. +// See the documentation for the dbgclock_t type. +#include +#endif + +#if defined(DBG_ENABLED) && defined(NDEBUG) +//#warning DBG_ENABLED defined with NDEBUG which do you want? +#endif + +/** + * @libdoc dbg library + * + * The dbg library is a set of C++ utilities to facilitate modern debugging + * idioms. + * + * It has been designed to support defensive programming techniques in modern + * C++ code. It integrates well with standard library usage and has been + * carefully designed to be easy to write, easy to read and very easy to use. + * + * It provides various constraint checking utilities together with an + * integrated error logging facility. These utilities are flexible and + * customisable. They can be enabled and disabled at runtime, and in release + * builds, dbg library use can be compiled away to nothing. + * + * Rich debugging can only be implemented in large code bases from the outset, + * it is hard to retrofit full defensive programming techniques onto existant + * code. For this reason it is good practice to use a library like dbg when + * you start a new project. By using dbg extensively you will find bugs + * quicker, and prevent more insideous problems rearing their head later in + * the project's life. + * + * For instructions on the dbg library's use see the @ref dbg namespace + * documentation. + */ + + + +/** + * The dbg namespace holds a number of C++ debugging utilities. + * + * They allow you to include constraint checking in your code, and provide + * an integrated advanced stream-based logging facility. + * + * The characteristics of this library are: + * @li Easy to use (not overly complex) + * (easy to write, easy to read, easy to use) + * @li Powerful + * @li Configurable + * @li No run time overhead when "compiled out" + * @li Minimises use of the proprocessor + * @li Can throw exceptions if required + * @li Can separate different "sources" of diagnostic output (these + * sources are differentated by name) + * @li Designed to be a "standard" library + * (integrates with the style of the C++ standard library and works + * well with it) + * + * @sect Enabling debugging + * + * To use dbg in your program you must #include <dbg.h> + * and compile with the DBG_ENABLED flag set. + * + * If you build without DBG_ENABLED you will have no debugging support (neither + * constraints nor logging). There is no overhead building a program using + * these utilities when DBG_ENABLED is not set. Well, actually there might be + * minimal overhead: there is no overhead when using gcc with a little + * optimisation (-O3). There is a few bytes overhead with + * optimisation disabled. (The -O1 level leaves almost no + * overhead.) + * + * Either way, the rich debugging support is probably worth a few bytes. + * + * Once your program is running, you will want to enable diagnostic + * levels with @ref dbg::enable, and probably attach an ostream (perhaps + * cerr) to the diagnostic outputs. See the default states section + * below for information on the initial state of dbg. + * + * Aside: + * The standard assert macro is an insideous little devil, a lower + * case macro. This library replaces it and builds much richer constraints + * in its place. + * However, because of it, we have to use an API name dbg::assertion, + * not dbg::assert - this makes me really cross, but I can't assume that the + * user does not #include <assert.h> when using + * <dbg.h> . + * + * @sect Using constraints + * + * The dbg library constraints are very easy to use. Each debugging utility is + * documented fully to help you understand how they work. Here are some simple + * examples of library use for run-time constraint checking: + *

+ *     void test_dbg()
+ *     {
+ *         dbg::trace trace(DBG_HERE);
+ *
+ *         int  i   = 5;
+ *         int *ptr = &i;
+ *
+ *         dbg::assertion(DBG_ASSERTION(i != 6));
+ *         dbg::check_ptr(ptr, DBG_HERE);
+ *
+ *         if (i == 5)
+ *         {
+ *             return;
+ *         }
+ *
+ *         // Shouldn't get here
+ *         dbg::sentinel(DBG_HERE);
+ *     }
+ * 
+ * + * The constraints provided by dbg are: + * @li @ref dbg::assertion - General purpose assertion + * (a better assert) + * @li @ref dbg::sentinel - Marker for "shouldn't get here" points + * @li @ref dbg::unimplemented - Marks unimplemented code + * @li @ref dbg::check_ptr - Zero pointer check + * @li @ref dbg::check_bounds - Array bounds checking + * @li @ref dbg::post_mem_fun - Member function post condition + * @li @ref dbg::post - General function post condition + * @li @ref dbg::compile_assertion - Compile time assertion + * + * You can modify constriant behaviour with: + * @li @ref dbg::set_assertion_behaviour - Set how contraints behave + * @li @ref dbg::set_assertion_period - Set up trigger periods + * + * See their individual documentation for further details on usage. + * + * You can specify whether constraints merely report a warning, cause + * an exception to be thrown, or immediately abort the program (see + * @ref dbg::assertion_behaviour). + * + * For assertions that may fire many times in a tight loop, there is the + * facility to time-restrict output (see @ref dbg::set_assertion_period) + * + * @sect Using logging + * + * All the constraint checking shown above integrates with the dbg library + * stream logging mechanisms. These logging facilities are open for your use as + * well. + * + * Here is a simlpe example of this: + *
+ *     dbg::attach_ostream(dbg::info, cout);
+ *     // now all 'info' messages go to cout
+ *
+ *     dbg::out(dbg::info)    << "This is some info I want to print out\n";
+ *
+ *     dbg::out(dbg::tracing) << dbg::indent()
+ *                            << "This is output at 'tracing' level, indented "
+ *                            << "to the same level as the current tracing "
+ *                            << "indent.\n";
+ * 
+ * + * When you build without the DBG_ENABLED flag specified, these logging + * messages will compile out to nothing. + * + * The logging is a very flexible system. You can attach multiple ostreams + * to any dbg output, so you can easily log to a file and log to the console, + * for example. The output can be formatted in a number of different ways to + * suit your needs. + * + * The logging mechanisms provide you with the ability to prepend to all + * diagnostic output a standard prefix (see @ref dbg::set_prefix), and + * also to add the diagnostic level and current time to the prefix (see + * @ref dbg::enable_level_prefix and @ref dbg::enable_time_prefix). + * + * The logging facilities provide by dbg include: + * @li @ref dbg::enable - Enable/disable activity + * @li @ref dbg::out - Returns a diagnostic ostream + * @li @ref dbg::attach_ostream - Attach an ostream to diagnostic output + * @li @ref dbg::detach_ostream - Detach an ostream to diagnostic output + * @li @ref dbg::trace - Trace entry/exit points + * + * The output formatting utilities include: + * @li @ref dbg::set_prefix - Sets the diagnostic output "margin" + * @li @ref dbg::enable_level_prefix - More information in messages + * @li @ref dbg::enable_time_prefix - Prints time in messages + * + * @sect Diagnostic sources + * + * The dbg library allows you to differentiate different "sources" of logging. + * + * Each of the debug utilities has a second form in which you can supply + * a string describing the source of the diagnostic output (see + * @ref dbg::dbg_source). This source may be a different software component, a + * separate file - whatever granularity you like! + * + * If you don't specify a @ref dbg::dbg_source then you are working with the + * ordinary "unnamed" source. + * + * Using these forms you can filter out diagnostics from the different + * parts of your code. Each source can also be attached to a different set of + * streams (logging each component to a separate file, for example). The + * filtering is rich - you can selectively filter each different diagnostic + * @ref dbg::level for each @ref dbg::dbg_source. For example, + * + *
+ *     dbg::enable(dbg::all, "foo-driver", true);
+ *     dbg::enable(dbg::all, "bar-driver", false);
+ *
+ *     int i = 5;
+ *     dbg::assertion("foo-driver", DBG_ASSERTION(i != 6));
+ *     dbg::assertion("bar-driver", DBG_ASSERTION(i != 6));
+ * 
+ * + * This will trigger an assertion for the "foo-driver" but not the + * "bar-driver". + * + * There is no requirement to "register" a @ref dbg::dbg_source. The first + * time you use it in any of the dbg APIs, it will be registered with the dbg + * library. It comes into an existance as a copy of the "default" + * debugging sourcei, @ref dbg::default_source. + * + * The default source initially has all debug levels disabled. + * You can change that with this call. Note that this function + * only affects sources created after the call is made. + * Existing sources are unaffected. + * + * If you don't know all of the @ref dbg::dbg_source sources currently + * available, you can blanket enable/disable them with @ref dbg::enable_all. + * + * It can be tedious to specify the @ref dbg_source in every dbg call in + * a source file. For this reason, you can specify the DBG_SOURCE compile + * time macro (wherever you specify DBG_ENABLED). When set, the calls + * automatically recieve the source name via the DBG_HERE macro (see + * @ref dbg::source_pos for details). If DBG_SOURCE is supplied but you call + * a dbg API with a specific named @ref dbg_source, this name will override + * the underlying DBG_SOURCE name. + * + * @sect Overloads + * + * Each constraint utility has a number of overloaded forms. This is to make + * using them more convenient. The most rich overload allows you to specify + * a diagnostic @ref dbg::level and a @ref dbg::dbg_source. There are other + * versions that omit one of these parameters, assuming a relevant default. + * + * @sect Default states + * + * When your program first starts up the dbg library has all debugging levels + * switched off. You can enable debugging with @ref dbg::enable. All of the + * possible @ref dbg::dbg_source enables are also all off for all + * levels. You can enable these with @ref dbg::enable, or @ref dbg::enable_all. + * + * Initially, the std::cerr stream is attached to the + * @ref dbg::error and @ref dbg::fatal diagnostic levels. You can + * attach ostreams to the other diagnostic levels with @ref + * dbg::attach_ostream. + * + * You can modify the "default state" of newly created debug sources. To do + * this use the special @ref dbg::default_source source name in calls to + * @ref dbg::enable, @ref dbg::attach_ostream, and and @ref detach_ostream. + * New sources take the setup from this template source. + * + * All assertion levels are set to @ref dbg::assertions_abort at first, like + * the standard library's assert macro. You can change this behaviour with + * @ref dbg::set_assertion_behaviour. There are no timeout periods set - you + * can change this with @ref dbg::set_assertion_period. + * + * @short Debugging utilities + * @author Pete Goodliffe + * @version 1.0 + */ +namespace dbg { + /** + * This is the version number of the dbg library. + * + * The value is encoded as version * 100. This means that 100 represents + * version 1.00, for example. + */ + const int version = 110; + + /************************************************************************** + * Debugging declarations + *************************************************************************/ + + /** + * The various predefined debugging levels. The dbg API calls use these + * levels as parameters, and allow the user to sift the less interesting + * debugging levels out through @ref dbg::enable. + * + * These levels (and their intended uses) are: + * @li info - Informational, just for interest + * @li warning - For warnings, bad things but recoverable + * @li error - For errors that can't be recovered from + * @li fatal - Errors at this level will cause the dbg library to abort + * program execution, no matter what the + * @ref assertion_behaviour is set to + * @li tracing - Program execution tracing messages + * @li debug - Messages about the state of dbg library, you cannot + * generate messages at this level + * @li none - For APIs that use 'no level specified' + * @li all - Used in @ref enable and @ref attach_ostream to + * specify all levels + */ + enum level { + info, + warning, + error, + fatal, + tracing, + debug, + none, + all + }; + + /** + * This enum type describes what happens when a debugging assertion + * fails. The behaviour can be: + * @li assertions_abort - Assertions cause a program abort + * @li assertions_throw - Assertions cause a @ref dbg_exception to + * be thrown + * @li assertions_continue - Assertions cause the standard diagnostic + * printout to occur (the same as the above + * behaviours) but execution continues + * regardless + * + * The dbg library defaults to assertions_abort behaviour, like the + * standard C assert. + * + * @see dbg::set_assertion_behaviour + */ + enum assertion_behaviour { + assertions_abort, + assertions_throw, + assertions_continue + }; + + /** + * typedef for a string that describes the "source" of a diagnostic. If + * you are working on a large project with many small code modules you may + * only want to enable debugging from particular source modules. This + * typedef facilitiates this. + * + * Depending on the desired granularity of your dbg sources you will use + * different naming conventions. For example, your dbg_sources might + * be filenames, that way you can switch off all debugging output from + * a particular file quite easily. It might be device driver names, + * component names, library names, or even function names. It's up to you. + * + * If you provide the DBG_SOURCE macro definition at compile time, then + * the DBG_HERE macro includes this source name, differentiating the + * sources for you automatically. + * + * @see dbg::enable(level,dbg_source,bool) + * @see dbg::enable_all + */ + typedef const char * dbg_source; + + /************************************************************************** + * source_pos + *************************************************************************/ + + /** + * Typedef used in the @ref source_pos data structure. + * + * Describes a line number in a source file. + * + * @see dbg::source_pos + */ + typedef const unsigned int line_no_t; + + /** + * Typedef used in the @ref source_pos data structure. + * + * Describes a function name in a source file. (Can be zero to + * indicate the function name cannot be assertained on this compiler). + * + * @see dbg::source_pos + */ + typedef const char * func_name_t; + + /** + * Typedef used in the @ref source_pos data structure. + * + * Describes a filename. + * + * @see dbg::source_pos + */ + typedef const char * file_name_t; + + /** + * Data structure describing a position in the source file. That is, + * @li The line number + * @li The function name (if the compiler supports this) + * @li The filename + * @li The @ref dbg_soruce specified by DBG_SOURCE compilation + * parameter, if any (otherwise zero) + * + * To create a source_pos for the current position, you can use + * the DBG_HERE convenience macro. + * + * There is an empty constructor that allows you to create a source_pos + * that represents 'no position specified'. + * + * This structure should only be used in dbg library API calls. + * + * You can print a source_pos using the usual stream manipulator syntax. + */ + struct source_pos { + line_no_t line; + func_name_t func; + file_name_t file; + dbg_source src; + + /** + * Creates a source_pos struct. Use the DBG_HERE macro to + * call this constructor conveniently. + */ + source_pos(line_no_t ln, func_name_t fn, file_name_t fl, dbg_source s) + : line(ln), func(fn), file(fl), src(s) {} + + /** + * A 'null' source_pos for 'no position specified' + */ + source_pos() + : line(0), func(0), file(0), src(0) {} + }; + +#ifndef _MSC_VER + /** + * The dbgclock_t typedef is an unfortunate workaround for comptability + * purposes. One (unnamed) popular compiler platform supplies a + * header file, but this header does NOT place the contents + * into the std namespace. + * + * This typedef is the most elegant work around for that problem. It is + * conditionally set to the appropriate clock_t definition. + * + * In an ideal world this would not exist. + * + * This is the version for sane, standards-compliant platforms. + */ + typedef std::clock_t dbgclock_t; +#else + /** + * See dbgclock_t documentation above. This is the version for broken + * compiler platforms. + */ + typedef clock_t dbgclock_t; +#endif + + /************************************************************************** + * Exceptions + *************************************************************************/ + + /** + * The base type of exception thrown by dbg assertions (and other dbg + * library constraint checks) if the @ref assertion_behaviour is set to + * assertions_throw. + * + * The exception keeps a record of the source position of the trigger + * for this exception. + */ + struct dbg_exception : public std::exception { + dbg_exception(const source_pos &p) : pos(p) {} + const source_pos pos; + }; + + /** + * The type of exception thrown by @ref assertion. + * + * @see assertion + */ + struct assertion_exception : public dbg_exception { + assertion_exception(const source_pos &p) : dbg_exception(p) {} + }; + + /** + * The type of exception thrown by @ref sentinel. + * + * @see sentinel + */ + struct sentinel_exception : public dbg_exception { + sentinel_exception(const source_pos &p) : dbg_exception(p) {} + }; + + /** + * The type of exception thrown by @ref unimplemented. + * + * @see unimplemented + */ + struct unimplemented_exception : public dbg_exception { + unimplemented_exception(const source_pos &p) : dbg_exception(p) {} + }; + + /** + * The type of exception thrown by @ref check_ptr. + * + * @see check_ptr + */ + struct check_ptr_exception : public dbg_exception { + check_ptr_exception(const source_pos &p) : dbg_exception(p) {} + }; + +#ifdef DBG_ENABLED + + /************************************************************************** + * default_source + *************************************************************************/ + + /** + * The name of a "template" debugging source that provides the default + * state for newly created sources. You can attach and detach logging + * streams here, and enable/disable logging levels. + * + * All source state is copied from the default_source to a new dbg_source. + * + * Whilst you can also use this source for diagnostic purposes this isn't + * it's intention, and it would be confusing to do so. + * + * See @ref dbg_source for discussion on the use of debugging sources in + * dbg. + * + * @see dbg_source + */ + extern dbg_source default_source; + + /************************************************************************** + * Debug version of the DBG_HERE macro + *************************************************************************/ + + /* + * DBG_FUNCTION is defined to be a macro that expands to the name of + * the current function, or zero if the compiler is unable to supply that + * information. It's sad that this wasn't included in the C++ standard + * from the very beginning. + */ +#if defined(__GNUC__) +#define DBG_FUNCTION __FUNCTION__ +#else +#define DBG_FUNCTION 0 +#endif + +#if !defined(DBG_SOURCE) +#define DBG_SOURCE 0 +#endif + + /* + * Handy macro to generate a @ref source_pos object containing the + * information of the current source line. + * + * @see dbg::source_pos + */ +#define DBG_HERE \ + (::dbg::source_pos(__LINE__, DBG_FUNCTION, __FILE__, DBG_SOURCE)) + + /************************************************************************** + * Enable/disable dbg facilities + *************************************************************************/ + + /** + * Enables or disables a particular debugging level. The affects dbg + * library calls which don't specify a @ref dbg_source, i.e. from the + * unnamed source. + * + * Enabling affects both constraint checking and diagnostic log output. + * + * If you enable a debugging level twice you only need to disable it once. + * + * All diagnostic output is initially disabled. You can easily enable + * output in your main() thus: + *
+   *     dbg::enable(dbg::all, true);
+   * 
+ * + * Note that if dbg library calls do specify a @ref dbg_source, or you + * provide a definition for the DBG_SOURCE macro on compilation, then you + * will instead need to enable output for that particular source. Use the + * overloaded version of enable. This version of enable doesn't affect + * these other @ref dbg_source calls. + * + * @param lvl Diagnostic level to enable/disable + * @param enabled true to enable this diagnostic level, false to disable it + * @see dbg::enable_all + * @see dbg::out + * @see dbg::attach_ostream + */ + void enable(level lvl, bool enabled); + + /** + * In addition to the above enable function, this overloaded version is + * used when you use dbg APIs with a @ref dbg_source specified. For these + * versions of the APIs no debugging will be performed unless you + * enable it with this API. + * + * To enable debugging for the "foobar" diagnostic source at the info + * level you need to do the following: + *
+   *     dbg::enable(dbg::info, "foobar", true);
+   * 
+ * + * If you enable a level for a particular @ref dbg_source twice you only + * need to disable it once. + * + * @param lvl Diagnostic level to enable/disable for the @ref dbg_source + * @param src String describing the diagnostic source + * @param enabled true to enable this diagnostic level, false to disable it + * @see dbg::out + */ + void enable(level lvl, dbg_source src, bool enabled); + + /** + * You may not know every single @ref dbg_source that is generating + * debugging in a particular code base. However, using this function + * you can enable a diagnostic level for all currently registered sources + * in one fell swoop. + * + * For example, + *
+   *     dbg::enable_all(dbg::all, true);
+   * 
+ */ + void enable_all(level lvl, bool enabled); + + /************************************************************************** + * Logging + *************************************************************************/ + + /** + * Returns an ostream suitable for sending diagnostic messages to. + * Each diagnostic level has a different logging ostream which can be + * enabled/disabled independantly. In addition, each @ref dbg_source + * has separate enables/disables for each diagnostic level. + * + * This overloaded version of out is used when you are creating diagnostics + * that are tied to a particular @ref dbg_source. + * + * It allows you to write code like this: + *
+   *     dbg::out(dbg::info, "foobar") << "The foobar is flaky\n";
+   * 
+ * + * If you want to prefix your diagnostics with the standard dbg library + * prefix (see @ref set_prefix) then use the @ref prefix or @ref indent + * stream manipulators. + * + * @param lvl Diagnostic level get get ostream for + * @param src String describing the diagnostic source + */ + std::ostream &out(level lvl, dbg_source src); + + /** + * Returns an ostream suitable for sending diagnostic messages to. + * Each diagnostic level has a different logging ostream which can be + * enabled/disabled independantly. + * + * You use this version of out when you are creating diagnostics + * that aren't tidied to a particular @ref dbg_source. + * + * Each diagnostic @ref dbg_source has a separate set of streams. + * This function returns the stream for the "unnamed" source. Use the + * overload below to obtain the stream for a named source. + * + * It allows you to write code like this: + *
+   *     dbg::out(dbg::info) << "The code is flaky\n";
+   * 
+ * + * If you want to prefix your diagnostics with the standard dbg library + * prefix (see @ref set_prefix) then use the @ref prefix or @ref indent + * stream manipulators. + * + * @param lvl Diagnostic level get get ostream for + */ + inline std::ostream &out(level lvl) { + return out(lvl, 0); + } + + /** + * Attaches the specified ostream to the given diagnostic level + * for the "unnamed" debug source. Now when diagnostics are produced + * at that level, this ostream will recieve a copy. + * + * You can attach multiple ostreams to a diagnostic level. Be careful + * that they don't go to the same place (e.g. cout and cerr both going + * to your console) - this might confuse you! + * + * If you attach a ostream mutiple times it will only receive one + * copy of the diagnostics, and you will only need to call + * @ref detach_ostream once. + * + * Remember, don't destroy the ostream without first removing it from + * dbg libary, or Bad Things will happen. + * + * @param lvl Diagnostic level + * @param o ostream to attach + * @see dbg::detach_ostream + * @see dbg::detach_all_ostreams + */ + void attach_ostream(level lvl, std::ostream &o); + + /** + * Attaches the specified ostream to the given diagnostic level + * for the specified debug source. Otherwise, similar to + * @ref dbg::attach_ostream above. + * + * @param lvl Diagnostic level + * @param src Debug source + * @param o ostream to attach + * @see dbg::detach_ostream + * @see dbg::detach_all_ostreams + */ + void attach_ostream(level lvl, dbg_source src, std::ostream &o); + + /** + * Detaches the specified ostream from the given diagnostic level. + * + * If the ostream was not attached then no error is generated. + * + * If you attached the ostream twice, one call to detach_ostream will + * remove it completely. + * + * @param lvl Diagnostic level + * @param o ostream to detach + * @see dbg::attach_ostream + * @see dbg::detach_all_ostreams + */ + void detach_ostream(level lvl, std::ostream &o); + + /** + * Detaches the specified ostream from the given diagnostic level + * for the specified debug source. Otherwise, similar to + * @ref dbg::detach_ostream above. + * + * @param lvl Diagnostic level + * @param src Debug source + * @param o ostream to detach + * @see dbg::attach_ostream + * @see dbg::detach_all_ostreams + */ + void detach_ostream(level lvl, dbg_source src, std::ostream &o); + + /** + * Detaches all attached ostreams from the specified diagnostic level + * for the "unnamed" diagnostic source. + * + * @param lvl Diagnostic level + * @see dbg::attach_ostream + * @see dbg::detach_ostream + */ + void detach_all_ostreams(level lvl); + + /** + * Detaches all attached ostreams from the specified diagnostic level + * for the specified debug source. Otherwise, similar to + * @ref dbg::detach_all_ostreams above. + * + * @param lvl Diagnostic level + * @see dbg::attach_ostream + * @see dbg::detach_ostream + */ + void detach_all_ostreams(level lvl, dbg_source src); + + /** + * Convenience function that returns the ostream for the info + * @ref dbg::level for the "unnamed" source. + * + * @see dbg::out + */ + inline std::ostream &info_out() { + return out(dbg::info); + } + + /** + * Convenience function that returns the ostream for the warning + * @ref dbg::level for the "unnamed" source. + * + * @see dbg::out + */ + inline std::ostream &warning_out() { + return out(dbg::warning); + } + + /** + * Convenience function that returns the ostream for the error + * @ref dbg::level for the "unnamed" source. + * + * @see dbg::out + */ + inline std::ostream &error_out() { + return out(dbg::error); + } + + /** + * Convenience function that returns the ostream for the fatal + * @ref dbg::level for the "unnamed" source. + * + * @see dbg::out + */ + inline std::ostream &fatal_out() { + return out(dbg::fatal); + } + + /** + * Convenience function that returns the ostream for the tracing + * @ref dbg::level for the "unnamed" source. + * + * @see dbg::out + */ + inline std::ostream &trace_out() { + return out(dbg::tracing); + } + + /************************************************************************** + * Output formatting + *************************************************************************/ + + /** + * Sets the debugging prefix - the characters printed before any + * diagnostic output. Defaults to "*** ". + * + * @param prefix New prefix string + * @see dbg::prefix + * @see dbg::enable_level_prefix + * @see dbg::enable_time_prefix + */ + void set_prefix(const char *prefix); + + /** + * The dbg library can add to the @ref prefix the name of the used + * diagnostic level (e.g. info, fatal, etc). + * + * By default, this facility is disabled. This function allows you to + * enable the facility. + * + * @param enabled true to enable level prefixing, false to disable + * @see dbg::set_prefix + * @see dbg::enable_time_prefix + */ + void enable_level_prefix(bool enabled); + + /** + * The dbg library can add to the @ref prefix the current time. This + * can be useful when debugging systems which remain active for long + * periods of time. + * + * By default, this facility is disabled. This function allows you to + * enable the facility. + * + * The time is produced in the format of the standard library ctime + * function. + * + * @param enabled true to enable time prefixing, false to disable + * @see dbg::set_prefix + * @see dbg::enable_level_prefix + */ + void enable_time_prefix(bool enabled); + + /** + * Used so that you can produce a prefix in your diagnostic output in the + * same way that the debugging library does. + * + * You can use it in one of two ways: with or without a diagnostic + * @ref level. For the latter, if level prefixing is enabled (see + * @ref enable_level_prefix) then produces a prefix including the + * specified diagnostic level text. + * + * Examples of use: + * + *
+   *     dbg::out(dbg::info) << dbg::prefix()
+   *                         << "A Bad Thing happened\n";
+   *
+   *     dbg::out(dbg::info) << dbg::prefix(dbg::info)
+   *                         << "A Bad Thing happened\n";
+   * 
+ * + * @see dbg::indent + * @see dbg::set_prefix + * @see dbg::enable_level_prefix + * @see dbg::enable_time_prefix + */ + struct prefix { + /** + * Creates a prefix with no specified diagnostic @ref level. + * No diagnostic level text will be included in the prefix. + */ + prefix() : l(none) {} + + /** + * @param lvl Diagnostic @ref level to include in prefix + */ + prefix(level lvl) : l(lvl) {} + + level l; + }; + + /** + * This is called when you use the @ref prefix stream manipulator. + * + * @internal + * @see dbg::prefix + */ + std::ostream &operator<<(std::ostream &s, const prefix &p); + + /** + * Used so that you can indent your diagnostic output to the same level + * as the debugging library. This also produces the @ref prefix output. + * + * Examples of use: + * + *
+   *     dbg::out(dbg::info) << dbg::indent()
+   *                         << "A Bad Thing happened\n";
+   *
+   *     dbg::out(dbg::info) << dbg::indent(dbg::info)
+   *                         << "A Bad Thing happened\n";
+   * 
+ * + * @see dbg::prefix + * @see dbg::set_prefix + * @see dbg::enable_level_prefix + * @see dbg::enable_time_prefix + */ + struct indent { + /** + * Creates a indent with no specified diagnostic @ref level. + * No diagnostic level text will be included in the @ref prefix part. + */ + indent() : l(none) {} + + /** + * @param lvl Diagnostic level to include in prefix + */ + indent(level lvl) : l(lvl) {} + + level l; + }; + + /** + * This is called when you use the @ref indent stream manipulator. + * + * @internal + * @see dbg::indent + */ + std::ostream &operator<<(std::ostream &s, const indent &i); + + /** + * This is called when you send a @ref source_pos to a diagnostic output. + * You can use this to easily check the flow of execcution in your + * program. + * + * For example, + *
+   *     dbg::out(dbg::tracing) << DBG_HERE << std::endl;
+   * 
+ * + * Take care that you only send DBG_HERE to the diagnostic outputs + * (obtained with @ref dbg::out) and not "ordinary" streams like + * std::cout. + * + * In non debug builds, DBG_HERE is a "no-op" doing nothing, and so no + * useful output will be produced on cout. + * + * @internal + * @see dbg::indent + */ + std::ostream &operator<<(std::ostream &s, const source_pos &pos); + + /************************************************************************** + * Behaviour + *************************************************************************/ + + /** + * Sets what happens when assertions (or other constraints) trigger. There + * will always be diagnostic ouput. Assertions have 'abort' behaviour by + * default - like the ISO C standard, they cause an abort. + * + * If an assertion is encountered at the fatal level, the debugging library + * will abort the program regardless of this behaviour setting. + * + * If a diagnostic level is not enabled (see @ref enable) then the + * @ref assertion_behaviour is not enacted, and no output is produced. + * + * @param lvl Diagnostic level to set behaviour for + * @param behaviour Assertion behaviour + * @see dbg::set_assertion_period + * @see dbg::enable + * @see dbg::assertion + * @see dbg::sentinel + * @see dbg::unimplemented + * @see dbg::check_ptr + */ + void set_assertion_behaviour(level lvl, assertion_behaviour behaviour); + + /** + * You may want an assertion to trigger once only and then for subsequent + * calls to remain inactive. For example, if there is an @ref assertion in + * a loop you may not want diagnostics produced for each loop iteration. + * + * To do this, you do the following: + *
+   *      // Prevent several thousand diagnostic print outs
+   *      dbg::set_assertion_period(CLOCKS_PER_SEC);
+   *
+   *      // Example loop
+   *      int array[LARGE_VALUE];
+   *      put_stuff_in_array(array);
+   *      for(unsigned int n = 0; n < LARGE_VALUE; n++)
+   *      {
+   *          dbg::assertion(DBG_ASSERT(array[n] != 0));
+   *          do_something(array[n]);
+   *      }
+   * 
+ * + * set_assertion_period forces a certain time period between triggers of a + * particular constraint. The @ref assertion in the example above will only + * be triggered once a second (despite the fact that the constraint + * condition will be broken thousands of times a second). This will not + * affect any other @ref assertion - they will each have their own timeout + * periods. + * + * Setting a period of zero disables any constraint period. + * + * The default behaviour is to have no period. + * + * If a period is set then diagnostic printouts will include the number + * of times each constraint has been triggered (since the period was set). + * Using this, even if diagnostics don't always appear on the attached + * ostreams you have some indication of how often each constraint is + * triggered. + * + * This call only really makes sense if the @ref assertion_behaviour is + * set to @ref assertions_continue. + * + * @param period Time between triggerings of each assertion, or zero to + * disable + * @see dbg::set_assertion_behaviour + * @see dbg::assertion + * @see dbg::sentinel + * @see dbg::unimplemented + * @see dbg::check_ptr + */ + void set_assertion_period(dbgclock_t period); + + /************************************************************************** + * Assertion + *************************************************************************/ + + /** + * Describes an @ref assertion. + * + * This is an internal data structure, you do not need to create it + * directly. Use the DBG_ASSERTION macro to create it. + * + * @internal + * @see dbg::assertion + */ + struct assert_info : public source_pos { + bool asserted; + const char *text; + + /** + * Do not call this directly. Use the DBG_ASSERTION macro. + * + * @internal + */ + assert_info(bool a, const char *t, + line_no_t line, func_name_t func, + file_name_t file, dbg_source spos) + : source_pos(line, func, file, spos), asserted(a), text(t) {} + + /** + * Do not call this directly. Use the DBG_ASSERTION macro. + * + * @internal + */ + assert_info(bool a, const char *b, const source_pos &sp) + : source_pos(sp), asserted(a), text(b) {} + }; + + /* + * Utility macro used by the DBG_ASSERTION macro - it converts a + * macro parameter into a character string. + */ +#define DBG_STRING(a) #a + + /* + * Handy macro used by clients of the @ref dbg::assertion function. + * It use is described in the @ref assertion documentation. + * + * @see dbg::assertion + */ +#define DBG_ASSERTION(a) \ + ::dbg::assert_info(a, DBG_STRING(a), DBG_HERE) + + // PATCH by mandor + void init(); + + /** + * Used to assert a constraint in your code. Use the DBG_ASSERTION macro + * to generate the third parameter. + * + * This version creates an assertion bound to a particular @ref dbg_source. + * + * The assertion is the most general constraint utility - there are others + * which have more specific purposes (like @ref check_ptr to ensure a + * pointer is non-null). assertion allows you to test any boolean + * expression. + * + * To use assertion for a @ref dbg_source "foobar" you write code like: + *
+   *     int i = 0;
+   *     dbg::assertion(info, "foobar", DBG_ASSERTION(i != 0));
+   * 
+ * + * If you build with debugging enabled (see @ref dbg) the program will + * produce diagnostic output to the relevant output stream if the + * constraint fails, and the appropriate @ref assertion_behaviour + * is enacted. + * + * Since in non-debug builds the expression in the DBG_ASSERTION macro + * will not be evaluated, it is important that the expression has no + * side effects. + * + * @param lvl Diagnostic level to assert at + * @param src String describing the diagnostic source + * @param ai assert_info structure created with DBG_ASSERTION + */ + void assertion(level lvl, dbg_source src, const assert_info &ai); + + /** + * Overloaded version of @ref assertion that is not bound to a particular + * @ref dbg_source. + * + * @param lvl Diagnostic level to assert at + * @param ai assert_info structure created with DBG_ASSERTION + */ + inline void assertion(level lvl, const assert_info &ai) { + assertion(lvl, 0, ai); + } + + /** + * Overloaded version of @ref assertion that defaults to the + * warning @ref level. + * + * @param src String describing the diagnostic source + * @param ai assert_info structure created with DBG_ASSERTION + */ + inline void assertion(dbg_source src, const assert_info &ai) { + assertion(warning, src, ai); + } + + /** + * Overloaded version of @ref assertion that defaults to the + * warning @ref level and is not bound to a particular @ref dbg_source. + * + * @param ai assert_info structure created with DBG_ASSERTION + */ + inline void assertion(const assert_info &ai) { + assertion(warning, 0, ai); + } + + /************************************************************************** + * Sentinel + *************************************************************************/ + + /** + * You should put this directly after a "should never get here" comment. + * + *
+   *      int i = 5;
+   *      if (i == 5)
+   *      {
+   *          std::cout << "Correct program behaviour\n";
+   *      }
+   *      else
+   *      {
+   *          dbg::sentinel(dbg::error, "foobar", DBG_HERE);
+   *      }
+   * 
+ * + * @param lvl Diagnostic level to assert at + * @param src String describing the diagnostic source + * @param here Supply DBG_HERE + */ + void sentinel(level lvl, dbg_source src, const source_pos &here); + + /** + * Overloaded version of @ref sentinel that is not bound to a particular + * @ref dbg_source. + * + * @param lvl Diagnostic level to assert at + * @param here Supply DBG_HERE + */ + inline void sentinel(level lvl, const source_pos &here) { + sentinel(lvl, 0, here); + } + + /** + * Overloaded version of @ref sentinel that defaults to the warning + * @ref level and is not bound to a particular @ref dbg_source. + * + * @param src String describing the diagnostic source + * @param here Supply DBG_HERE + */ + inline void sentinel(dbg_source src, const source_pos &here) { + sentinel(warning, src, here); + } + + /** + * Overloaded version of @ref sentinel that defaults to the warning + * @ref level and is not bound to a particular @ref dbg_source. + * + * @param here Supply DBG_HERE + */ + inline void sentinel(const source_pos &here) { + sentinel(warning, 0, here); + } + + /************************************************************************** + * Unimplemented + *************************************************************************/ + + /** + * You should put this directly after a "this has not been implemented + * (yet)" comment. + * + *
+   *      switch (variable)
+   *      {
+   *          ...
+   *          case SOMETHING:
+   *          {
+   *              dbg::unimplemented(dbg::warning, "foobar", DBG_HERE);
+   *              break;
+   *          }
+   *          ...
+   *      }
+   * 
+ * + * Note the "break;" above - if the @ref assertion_behaviour is non-fatal + * then execution will continue. You wouldn't want unintentional + * fall-through. + * + * @param lvl Diagnostic level to assert at + * @param src String describing the diagnostic source + * @param here Supply DBG_HERE + * + */ + void unimplemented(level lvl, dbg_source src, const source_pos &here); + + /** + * Overloaded version of @ref unimplemented that is not bound to a + * particular @ref dbg_source. + * + * @param lvl Diagnostic level to assert at + * @param here Supply DBG_HERE + */ + inline void unimplemented(level lvl, const source_pos &here) { + unimplemented(lvl, 0, here); + } + + /** + * Overloaded version of @ref unimplemented that defaults to the + * warning @ref level. + * + * @param src String describing the diagnostic source + * @param here Supply DBG_HERE + */ + inline void unimplemented(dbg_source src, const source_pos &here) { + unimplemented(warning, src, here); + } + + /** + * Overloaded version of @ref unimplemented that defaults to the + * warning @ref level and is not bound to a particular @ref dbg_source. + * + * @param here Supply DBG_HERE + */ + inline void unimplemented(const source_pos &here) { + unimplemented(warning, 0, here); + } + + /************************************************************************** + * Pointer checking + *************************************************************************/ + + /** + * A diagnostic function to assert that a pointer is not zero. + * + * To use it you write code like: + *
+   *     void *p = 0;
+   *     dbg::check_ptr(dbg::info, "foobar", p, DBG_HERE);
+   * 
+ * + * It's better to use this than a general purpose @ref assertion. It + * reads far more intuitively in your code. + * + * @param lvl Diagnostic level to assert at + * @param src String describing the diagnostic source + * @param p Pointer to check + * @param here Supply DBG_HERE + */ + void check_ptr(level lvl, dbg_source src, void *p, const source_pos &here); + + /** + * Overloaded version of @ref check_ptr that is not bound to a particular + * @ref dbg_source. + * + * @param lvl Diagnostic level to assert at + * @param p Pointer to check + * @param here Supply DBG_HERE + */ + inline void check_ptr(level lvl, void *p, const source_pos &here) { + check_ptr(lvl, 0, p, here); + } + + /** + * Overloaded version of @ref check_ptr that defaults to the + * warning @ref level. + * + * @param src String describing the diagnostic source + * @param p Pointer to check + * @param here Supply DBG_HERE + */ + inline void check_ptr(dbg_source src, void *p, const source_pos &here) { + check_ptr(warning, src, p, here); + } + + /** + * Overloaded version of @ref check_ptr that defaults to the + * warning @ref level and is not bound to a particular @ref dbg_source. + * + * @param p Pointer to check + * @param here Supply DBG_HERE + */ + inline void check_ptr(void *p, const source_pos &here) { + check_ptr(warning, 0, p, here); + } + + /************************************************************************** + * Bounds checking + *************************************************************************/ + + /** + * Utility that determines the number of elements in an array. Used + * by the @ref check_bounds constraint utility function. + * + * This is not available in non-debug versions, so do not use it + * directly. + * + * @param array Array to determine size of + * @return The number of elements in the array + * @internal + */ + template + inline unsigned int array_size(T &array) { + return sizeof(array)/sizeof(array[0]); + } + + /** + * A diagnostic function to assert that an array access is not out + * of bounds. + * + * You probably want to use the more convenient check_bounds versions + * below if you are accessing an array whose definition is in scope - + * the compiler will then safely detrmine the size of the array for you. + * + * @param lvl Diagnostic level to assert at + * @param src String describing the diagnostic source + * @param index Test index + * @param bound Boundary value (index must be < bound, and >= 0) + * @param here Supply DBG_HERE + */ + void check_bounds(level lvl, dbg_source src, + int index, int bound, const source_pos &here); + /** + * A diagnostic function to assert that an array access is not out + * of bounds. With this version you can specify the minimum and maximum + * bound value. + * + * You probably want to use the more convenient check_bounds version + * below if you are accessing an array whose definition is in scope - + * the compiler will then safely detrmine the size of the array for you. + * + * @param lvl Diagnostic level to assert at + * @param src String describing the diagnostic source + * @param index Test index + * @param minbound Minimum bound (index must be >= minbound + * @param maxbound Minimum bound (index must be < maxbound) + * @param here Supply DBG_HERE + */ + inline void check_bounds(level lvl, dbg_source src, + int index, int minbound, int maxbound, + const source_pos &here) { + check_bounds(lvl, src, index-minbound, maxbound, here); + } + + /** + * Overloaded version of check_bounds that can automatically determine the + * size of an array if it within the current scope. + * + * You use it like this: + *
+   *     int a[10];
+   *     int index = 10;
+   *     dbg::check_bounds(dbg::error, index, a, DBG_HERE);
+   *     a[index] = 5;
+   * 
+ * + * @param lvl Diagnostic level to assert at + * @param src String describing the diagnostic source + * @param index Test index + * @param array Array index is applied to + * @param here Supply DBG_HERE + */ + template + void check_bounds(level lvl, dbg_source src, + int index, T &array, const source_pos &here) { + check_bounds(lvl, src, index, array_size(array), here); + } + + /** + * Overloaded version of @ref check_bounds that is not bound to a + * particular @ref dbg_source. + * + * @param lvl Diagnostic level to assert at + * @param index Test index + * @param array Array index is applied to + * @param here Supply DBG_HERE + */ + template + void check_bounds(level lvl, int index, T &array, const source_pos &here) { + check_bounds(lvl, 0, index, array_size(array), here); + } + + /** + * Overloaded version of @ref check_bounds that defaults to the + * warning @ref level. + * + * @param src String describing the diagnostic source + * @param index Test index + * @param array Array index is applied to + * @param here Supply DBG_HERE + */ + template + void check_bounds(dbg_source src, int index, T &array, + const source_pos &here) { + check_bounds(warning, src, index, array_size(array), here); + } + + /** + * Overloaded version of @ref check_bounds that defaults to the + * warning @ref level and is not bound to a particular @ref dbg_source. + * + * @param index Test index + * @param array Array index is applied to + * @param here Supply DBG_HERE + */ + template + void check_bounds(int index, T &array, const source_pos &here) { + check_bounds(warning, 0, index, array_size(array), here); + } + + /************************************************************************** + * Tracing + *************************************************************************/ + + /** + * The trace class allows you to easily produce tracing diagnostics. + * + * When the ctor is called, it prints "->" and the name of the + * function, increasing the indent level. When the object is deleted + * it prints "<-" followed again by the name of the function. + * + * You can use the name of the current function gathered via the + * DBG_HERE macro, or some other tracing string you supply. + * + * Diagnostics are produced at the tracing @ref level. + * + * For example, if you write the following code: + * + *
+   *     void foo()
+   *     {
+   *         dbg::trace t1(DBG_HERE);
+   *         // do some stuff
+   *         {
+   *             dbg::trace t2("sub block");
+   *             // do some stuff
+   *             dbg::out(tracing) << dbg::prefix() << "Hello!\n";
+   *         }
+   *         dbg::out(tracing) << dbg::prefix() << "Hello again!\n";
+   *         // more stuff
+   *     }
+   * 
+ * + * You will get the following tracing information: + * + *
+   *     *** ->foo (0 in foo.cpp)
+   *     ***   ->sub block
+   *     ***     Hello!
+   *     ***   <-sub block
+   *     ***   Hello again!
+   *     *** <-foo (0 in foo.cpp)
+   * 
+ * + * Don't forget to create named dbg::trace objects. If you create + * anonymous objects (i.e. you just wrote "dbg::trace(DBG_HERE);") + * then the destructor will be called immediately, rather than at the + * end of the block scope, causing invalid trace output. + * + * Tracing does not cause assertions to trigger, therefore you will + * never generate an abort or exception using this object. + * + * If you disable the tracing diagnostic @ref level before the trace + * object's destructor is called you will still get the closing trace + * output. This is important, otherwise the indentation level of the + * library would get out of sync. In this case, the closing diagnostic + * output will have a "note" attached to indicate what has happened. + * + * Similarly, if tracing diagnostics are off when the trace object is + * created, yet subsequencently enabled before the destructor there will + * be no closing tracing ouput. + */ + class trace { + public: + + /** + * Provide the function name, or some other tracing string. + * + * This will not tie the trace object to a particular + * @ref dbg_source. + * + * @param name Tracing block name + */ + trace(func_name_t name); + + /** + * @param src String describing the diagnostic source + * @param name Tracing block name + */ + trace(dbg_source src, func_name_t name); + + /** + * This will not tie the trace object to a particular + * @ref dbg_source. + * + * @param here Supply DBG_HERE + */ + trace(const source_pos &here); + + /** + * @param src String describing the diagnostic source + * @param here Supply DBG_HERE + */ + trace(dbg_source src, const source_pos &here); + + ~trace(); + + private: + + trace(const trace &); + trace &operator=(const trace &); + + void trace_begin(); + void trace_end(); + + dbg_source m_src; + const char *m_name; + const source_pos m_pos; + bool m_triggered; + }; + + /************************************************************************** + * Post conditions + *************************************************************************/ + + /** + * A post condition class. This utility automates the checking of + * post conditions using @ref assertion. It requires a member function + * with the signature: + *
+   *     bool some_class::invariant() const;
+   * 
+ * + * When you create a post_mem_fun object you specify a post condition + * member function. When the post_mem_fun object is destroyed the + * postconsition is asserted. + * + * This is useful for methods where there are a number of exit points + * which would make it tedious to put the same @ref dbg::assertion + * in multiple places. + * + * It is also handy when an exception might be thrown and propagated by a + * funciton, ensuring that a postcondition is first checked. Bear in mind + * that Bad Things can happen if the @ref assertion_behaviour is + * assertions_throw and this is triggered via a propagating exception. + * + * An example of usage, the do_test method below uses the post_mem_fun + * object: + *
+   *     class test
+   *     {
+   *         public:
+   *             test() : a(10) {}
+   *             do_test()
+   *             {
+   *                 dbg::post_mem_fun
+   *                    post(dbg::info, this, &test::invariant, DBG_HERE);
+   *                 a = 9;
+   *                 if (SOME_CONDITION)
+   *                 {
+   *                     return;                                      // (*)
+   *                 }
+   *                 else if (SOME_OTHER_CONDITION)
+   *                 {
+   *                     throw std::exception();                      // (*)
+   *                 }
+   *                                                                  // (*)
+   *             }
+   *         private:
+   *             bool invariant()
+   *             {
+   *                 return a == 10;
+   *             }
+   *             int a;
+   *     };
+   * 
+ * The post condition will be asserted at each point marked (*). + * + * @see dbg::post + */ + template + class post_mem_fun { + public: + + /** + * The type of the contraint function. It returns a bool and + * takes no parameters. + */ + typedef bool (obj_t::*fn_t)(); + + /** + * @param lvl Diagnostic level + * @param obj Object to invoke @p fn on (usually "this") + * @param fn Post condition member function + * @param here Supply DBG_HERE + */ + post_mem_fun(level lvl, obj_t *obj, fn_t fn, const source_pos &pos) + : m_lvl(lvl), m_src(0), m_obj(obj), m_fn(fn), m_pos(pos) {} + + /** + * @param lvl Diagnostic level + * @param src String describing the diagnostic source + * @param obj Object to invoke @p fn on (usually "this") + * @param fn Post condition member function + * @param here Supply DBG_HERE + */ + post_mem_fun(level lvl, dbg_source src, + obj_t *obj, fn_t fn, const source_pos &pos) + : m_lvl(lvl), m_src(src), m_obj(obj), m_fn(fn), m_pos(pos) {} + + /** + * Overloaded version of constructor which defaults to the + * @ref warning diagnostic level. + * + * @param obj Object to invoke @p fn on (usually "this") + * @param fn Post condition member function + * @param here Supply DBG_HERE + */ + post_mem_fun(obj_t *obj, fn_t fn, const source_pos &pos) + : m_lvl(dbg::warning), m_src(0), + m_obj(obj), m_fn(fn), m_pos(pos) {} + + /** + * Overloaded version of constructor which defaults to the + * @ref warning diagnostic level. + * + * @param src String describing the diagnostic source + * @param obj Object to invoke @p fn on (usually "this") + * @param fn Post condition member function + * @param here Supply DBG_HERE + */ + post_mem_fun(dbg_source src, obj_t *obj, fn_t fn, + const source_pos &pos) + : m_lvl(dbg::warning), m_src(src), + m_obj(obj), m_fn(fn), m_pos(pos) {} + + /** + * The destructor asserts the post condition. + */ + ~post_mem_fun() { + assertion(m_lvl, m_src, + assert_info((m_obj->*m_fn)(), "post condition", + m_pos.line, m_pos.func, m_pos.file, m_pos.src)); + } + + private: + + const level m_lvl; + const dbg_source m_src; + obj_t *m_obj; + fn_t m_fn; + const source_pos m_pos; + }; + + /** + * A post condition class. Unlike @ref post_mem_fun, this class + * calls a non-member function with signature: + *
+   *     bool some_function();
+   * 
+ * + * Otherwise, use it identically to the @ref post_mem_fun. + * + * @see dbg::post_mem_fun + */ + class post { + public: + + /** + * The type of the contraint function. It returns a bool and + * takes no parameters. + */ + typedef bool (*fn_t)(); + + /** + * @param lvl Diagnostic level + * @param fn Post condition function + * @param here Supply DBG_HERE + */ + post(level lvl, fn_t fn, const source_pos &pos) + : m_lvl(lvl), m_src(0), m_fn(fn), m_pos(pos) {} + + /** + * @param lvl Diagnostic level + * @param src String describing the diagnostic source + * @param fn Post condition function + * @param here Supply DBG_HERE + */ + post(level lvl, dbg_source src, fn_t fn, const source_pos &pos) + : m_lvl(lvl), m_src(src), m_fn(fn), m_pos(pos) {} + + /** + * Overloaded version of constructor which defaults to the + * @ref warning diagnostic level. + * + * @param fn Post condition function + * @param here Supply DBG_HERE + */ + post(fn_t fn, const source_pos &pos) + : m_lvl(dbg::warning), m_src(0), m_fn(fn), m_pos(pos) {} + + /** + * Overloaded version of constructor which defaults to the + * @ref warning diagnostic level. + * + * @param src String describing the diagnostic source + * @param fn Post condition function + * @param here Supply DBG_HERE + */ + post(dbg_source src, fn_t fn, const source_pos &pos) + : m_lvl(dbg::warning), m_src(src), m_fn(fn), m_pos(pos) {} + + /** + * The destructor asserts the post condition. + */ + ~post() { + assertion(m_lvl, m_src, + assert_info(m_fn(), "post condition", + m_pos.line, m_pos.func, m_pos.file, m_pos.src)); + } + + private: + + level m_lvl; + const dbg_source m_src; + fn_t m_fn; + const source_pos m_pos; + }; + + /************************************************************************** + * Compile time assertions + *************************************************************************/ + + /** + * If we need to assert a constraint that can be calculated at compile + * time, then it would be advantageous to do so - moving error detection + * to an earlier phase in development is always a Good Thing. + * + * This utility allows you to do this. You use it like this: + * + *
+   *     enum { foo = 4, bar = 6 };
+   *     compile_assertion<(foo > bar)>();
+   * 
+ * + * There is a particular point to observe here. Although the + * expression is now a template parameter, it is important to contain it + * in parentheses. This is simply because the expression contains a ">" + * which otherwise would be taken by the compiler to be the closing of + * the template parameter. Although not all expressions require this, + * it is good practice to do it at all times. + */ + template + class compile_assertion; + template <> + class compile_assertion {}; + +#else + + /************************************************************************** + * Non-debug stub versions + *************************************************************************/ + + /* + * With debugging switched off we generate null versions of the above + * definitions. + * + * Given a good compiler and a strong prevailing headwind, these will + * optimise away to nothing. + */ + +#define DBG_HERE ((void*)0) +#define DBG_ASSERTION(a) ((void*)0) + + //enum { default_source = 0xdead }; + const dbg_source default_source = 0; + + /** + * In non-debug versions, this class is used to replace an ostream + * so that code will compile away. Do not use it directly. + * + * @internal + */ + class null_stream { + public: +#ifdef _MSC_VER + null_stream &operator<<(void *) { + return *this; + } + null_stream &operator<<(const void *) { + return *this; + } + null_stream &operator<<(long) { + return *this; + } +#else + template + null_stream &operator<<(const otype &) { + return *this; + } +#endif + + template + null_stream &operator<<(otype &) { + return *this; + } + null_stream &operator<<(std::ostream& (*)(std::ostream&)) { + return *this; + } + }; + + struct prefix { + prefix() {} prefix(level) {} + }; + struct indent { + indent() {} indent(level) {} + }; + + inline void enable(level, bool) {} + inline void enable(level, dbg_source, bool) {} + inline void enable_all(level, bool) {} + inline null_stream out(level, dbg_source) { + return null_stream(); + } + inline null_stream out(level) { + return null_stream(); + } + inline void attach_ostream(level, std::ostream &) {} + inline void attach_ostream(level, dbg_source, std::ostream &) {} + inline void detach_ostream(level, std::ostream &) {} + inline void detach_ostream(level, dbg_source, std::ostream &) {} + inline void detach_all_ostreams(level) {} + inline void detach_all_ostreams(level, dbg_source) {} + inline null_stream info_out() { + return null_stream(); + } + inline null_stream warning_out() { + return null_stream(); + } + inline null_stream error_out() { + return null_stream(); + } + inline null_stream fatal_out() { + return null_stream(); + } + inline null_stream trace_out() { + return null_stream(); + } + inline void set_prefix(const char *) {} + inline void enable_level_prefix(bool) {} + inline void enable_time_prefix(bool) {} + + inline void set_assertion_behaviour(level, assertion_behaviour) {} + inline void set_assertion_period(dbgclock_t) {} + inline void assertion(level, dbg_source, void *) {} + inline void assertion(level, void *) {} + inline void assertion(dbg_source, void *) {} + inline void assertion(void *) {} + inline void sentinel(level, dbg_source, void *) {} + inline void sentinel(level, void *) {} + inline void sentinel(dbg_source, void *) {} + inline void sentinel(void *) {} + inline void unimplemented(level, dbg_source, void *) {} + inline void unimplemented(level, void *) {} + inline void unimplemented(dbg_source, void *) {} + inline void unimplemented(void *) {} + inline void check_ptr(level, dbg_source, void *, void *) {} + inline void check_ptr(level, void *, void *) {} + inline void check_ptr(dbg_source, void *, void *) {} + inline void check_ptr(void *, void *) {} + inline void check_bounds(level, void *, int, int, void *) {} + inline void check_bounds(level, dbg_source, int, void*, void*) {} + inline void check_bounds(level, dbg_source, int, int, + void *, void *) {} + inline void check_bounds(level, int, void *, void*) {} + inline void check_bounds(void *, int, void *, void *) {} + inline void check_bounds(int, void *, void *) {} + inline void init() {} + class trace { + public: + trace(const char *fn_name) {} + trace(dbg_source, const char *fn_name) {} + trace(void *here) {} + trace(dbg_source, void *here) {} + ~trace() {} + }; + + template + class post_mem_fun { + public: + typedef bool (obj_t::*fn_t)(); + post_mem_fun(level, void *, fn_t, void *) {} + post_mem_fun(level, dbg_source, void *, fn_t, void *) {} + post_mem_fun(void *, fn_t, void *) {} + post_mem_fun(dbg_source, void *, fn_t, void *) {} + ~post_mem_fun() {} + }; + class post { + public: + typedef bool(*fn_t)(); + post(level, fn_t, void *) {} + post(level, dbg_source, fn_t, void *) {} + post(fn_t, void *) {} + post(dbg_source, fn_t, void *) {} + ~post() {} + }; + + template + class compile_assertion {}; + +#endif +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.cpp b/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.cpp new file mode 100644 index 000000000..e0ae6f8db --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.cpp @@ -0,0 +1,2978 @@ +/* --------------------------------------------------------- */ +/* --- File: cmaes.c -------- Author: Nikolaus Hansen --- */ +/* --------------------------------------------------------- */ +/* + CMA-ES for non-linear function minimization. + + Copyright 1996, 2003, 2007, 2013 Nikolaus Hansen + e-mail: hansen .AT. lri.fr + + This program is free software; you can redistribute it and/or modify + it under the terms of either the GNU General Public License, version 2, + or, the GNU Lesser General Public License, version 2.1 or later, as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + + See . + +*/ +/* --- Changes : --- + 03/03/21: argument const double *rgFunVal of + cmaes_ReestimateDistribution() was treated incorrectly. + 03/03/29: restart via cmaes_resume_distribution() implemented. + 03/03/30: Always max std dev / largest axis is printed first. + 03/08/30: Damping is adjusted for large mueff. + 03/10/30: Damping is adjusted for large mueff always. + 04/04/22: Cumulation time and damping for step size adjusted. + No iniphase but conditional update of pc. + 05/03/15: in ccov-setting mucov replaced by mueff. + 05/10/05: revise comment on resampling in example.c + 05/10/13: output of "coorstddev" changed from sigma * C[i][i] + to correct sigma * sqrt(C[i][i]). + 05/11/09: Numerical problems are not anymore handled by increasing + sigma, but lead to satisfy a stopping criterion in + cmaes_Test(). + 05/11/09: Update of eigensystem and test for numerical problems + moved right before sampling. + 06/02/24: Non-ansi array definitions replaced (thanks to Marc + Toussaint). + 06/02/25: Overflow in time measurement for runs longer than + 2100 seconds. This could lead to stalling the + covariance matrix update for long periods. + Time measurement completely rewritten. + 06/02/26: Included population size lambda as parameter to + cmaes_init (thanks to MT). + 06/02/26: Allow no initial reading/writing of parameters via + "non" and "writeonly" keywords for input parameter + filename in cmaes_init. + 06/02/27: Optimized code regarding time spent in updating the + covariance matrix in function Adapt_C2(). + 07/08/03: clean up and implementation of an exhaustive test + of the eigendecomposition (via #ifdef for now) + 07/08/04: writing of output improved + 07/08/xx: termination criteria revised and more added, + damp replaced by damps=damp*cs, documentation improved. + Interface significantly changed, evaluateSample function + and therefore the function pointer argument removed. + Renaming of functions in accordance with Java code. + Clean up of parameter names, mainly in accordance with + Matlab conventions. Most termination criteria can be + changed online now. Many more small changes, but not in + the core procedure. + 07/10/29: ReSampleSingle() got a better interface. ReSampleSingle() + is now ReSampleSingle_old only for backward + compatibility. Also fixed incorrect documentation. The new + function SampleSingleInto() has an interface similar to + the old ReSampleSingle(), but is not really necessary. + 07/11/20: bug: stopMaxIter did not translate into the correct default + value but into -1 as default. This lead to a too large + damps and the termination test became true from the first + iteration. (Thanks to Michael Calonder) + 07/11/20: new default stopTolFunHist = 1e-13; (instead of zero) + 08/09/26: initial diagonal covariance matrix in code, but not + yet in interface + 08/09/27: diagonalCovarianceMatrix option in initials.par provided + 08/10/17: uncertainty handling implemented in example3.c. + PerturbSolutionInto() provides the optional small + perturbations before reevaluation. + 10/10/16: TestForTermination changed such that diagonalCovarianceMatrix + option now yields linear time behavior + 12/05/28: random seed > 2e9 prohibited to avoid an infinite loop on 32bit systems + 12/10/21: input parameter file values "no", "none" now work as "non". + 12/10/xx: tentative implementation of cmaes_Optimize + 12/10/xx: some small changes with char * mainly to prevent warnings in C++ + 12/10/xx: added some string convenience functions isNoneStr, new_string, assign_string + 13/01/03: rename files example?, initials.par, signals.par + 14/04/29: removed bug, au = t->al[...], from the (new) boundary handling + code (thanks to Emmanuel Benazera for the hint) + + Wish List + o make signals_filename part of cmaes_t using assign_string() + + o as writing time is measure for all files at once, the display + cannot be independently written to a file via signals.par, while + this would be desirable. + + o clean up sorting of eigenvalues and vectors which is done repeatedly. + + o either use cmaes_Get() in cmaes_WriteToFilePtr(): revise the + cmaes_write that all keywords available with get and getptr are + recognized. Also revise the keywords, keeping backward + compatibility. (not only) for this it would be useful to find a + way how cmaes_Get() signals an unrecognized keyword. For GetPtr + it can return NULL. + + o or break cmaes_Get() into single getter functions, being a nicer + interface, and compile instead of runtime error, and faster. For + file signals.par it does not help. + + o writing data depending on timing in a smarter way, e.g. using 10% + of all time. First find out whether clock() is useful for measuring + disc writing time and then timings_t class can be utilized. + For very large dimension the default of 1 seconds waiting might + be too small. + + o allow modification of best solution depending on delivered f(xmean) + + o re-write input and output procedures +*/ + +#include /* sqrt() */ +#include /* size_t */ +#include /* NULL, free */ +#include /* strlen() */ +#include /* sprintf(), NULL? */ +#include +#include "cmaes.h" +#include "cmaes_interface.h" /* via cmaes.h */ + +/* --------------------------------------------------------- */ +/* ------------------- Declarations ------------------------ */ +/* --------------------------------------------------------- */ + +/* ------------------- External Visibly -------------------- */ + +/* see cmaes_interface.h for those, not listed here */ + +long random_init(random_t *, long unsigned seed /* 0==clock */); +void random_exit(random_t *); +double random_Gauss(random_t *); /* (0,1)-normally distributed */ +double random_Uniform(random_t *); +long random_Start(random_t *, long unsigned seed /* 0==1 */); + +void timings_init(timings_t *timing); +void timings_start(timings_t *timing); /* fields totaltime and tictoctime */ +double timings_update(timings_t *timing); +void timings_tic(timings_t *timing); +double timings_toc(timings_t *timing); + +void readpara_init (readpara_t *, int dim, int seed, const double * xstart, + const double * sigma, int lambda, const char * filename); +void readpara_exit(readpara_t *); +void readpara_ReadFromFile(readpara_t *, const char *szFileName); +void readpara_SupplementDefaults(readpara_t *); +void readpara_SetWeights(readpara_t *, const char * mode); +void readpara_WriteToFile(readpara_t *, const char *filenamedest); + +const double * cmaes_Optimize( cmaes_t *, double(*pFun)(double const *, int dim), + long iterations); +double const * cmaes_SetMean(cmaes_t *, const double *xmean); +double * cmaes_PerturbSolutionInto(cmaes_t *t, double *xout, + double const *xin, double eps); +void cmaes_WriteToFile(cmaes_t *, const char *key, const char *name); +void cmaes_WriteToFileAW(cmaes_t *t, const char *key, const char *name, + const char * append); +void cmaes_WriteToFilePtr(cmaes_t *, const char *key, FILE *fp); +void cmaes_ReadFromFilePtr(cmaes_t *, FILE *fp); +void cmaes_FATAL(char const *s1, char const *s2, + char const *s3, char const *s4); + + +/* ------------------- Locally visibly ----------------------- */ + +static char * getTimeStr(void); +static void TestMinStdDevs( cmaes_t *); +/* static void WriteMaxErrorInfo( cmaes_t *); */ + +static void Eigen( int N, double **C, double *diag, double **Q, + double *rgtmp); +static int Check_Eigen( int N, double **C, double *diag, double **Q); +static void QLalgo2 (int n, double *d, double *e, double **V); +static void Householder2(int n, double **V, double *d, double *e); +static void Adapt_C2(cmaes_t *t, int hsig); + +static void FATAL(char const *sz1, char const *s2, + char const *s3, char const *s4); +static void ERRORMESSAGE(char const *sz1, char const *s2, + char const *s3, char const *s4); +static int isNoneStr(const char * filename); +static void Sorted_index( const double *rgFunVal, int *index, int n); +static int SignOfDiff( const void *d1, const void * d2); +static double douSquare(double); +static double rgdouMax( const double *rgd, int len); +static double rgdouMin( const double *rgd, int len); +static double douMax( double d1, double d2); +static double douMin( double d1, double d2); +static int intMin( int i, int j); +static int MaxIdx( const double *rgd, int len); +static int MinIdx( const double *rgd, int len); +static double myhypot(double a, double b); +static double * new_double( int n); +static void * new_void( int n, size_t size); +static char * new_string( const char *); +static void assign_string( char **, const char*); + +/* --------------------------------------------------------- */ +/* ---------------- Functions: cmaes_t --------------------- */ +/* --------------------------------------------------------- */ + +static char * +getTimeStr(void) { + time_t tm = time(NULL); + static char s[33]; + + /* get time */ + strncpy(s, ctime(&tm), 24); /* TODO: hopefully we read something useful */ + s[24] = '\0'; /* cut the \n */ + return s; +} + +char * +cmaes_SayHello(cmaes_t *t) { + /* write initial message */ + sprintf(t->sOutString, + "(%d,%d)-CMA-ES(mu_eff=%.1f), Ver=\"%s\", dimension=%d, diagonalIterations=%ld, randomSeed=%d (%s)", + t->sp.mu, t->sp.lambda, t->sp.mueff, t->version, t->sp.N, (long)t->sp.diagonalCov, + t->sp.seed, getTimeStr()); + + return t->sOutString; +} + +double * +cmaes_init(cmaes_t *t, /* "this" */ + int dimension, + double *inxstart, + double *inrgstddev, /* initial stds */ + long int inseed, + int lambda, + const char *input_parameter_filename) { + int i, j, N; + double dtest, trace; + static const char * version = "3.11.02.beta"; + + if (t->version && strcmp(version, t->version) == 0) { + ERRORMESSAGE("cmaes_init called twice, which will lead to a memory leak, use cmaes_exit first",0,0,0); + printf("Warning: cmaes_init called twice, which will lead to a memory leak, use cmaes_exit first\n"); + } + t->version = version; + /* assign_string(&t->signalsFilename, "cmaes_signals.par"); */ + + readpara_init (&t->sp, dimension, inseed, inxstart, inrgstddev, + lambda, input_parameter_filename); + t->sp.seed = random_init( &t->rand, (long unsigned int) t->sp.seed); + + N = t->sp.N; /* for convenience */ + + /* initialization */ + for (i = 0, trace = 0.; i < N; ++i) + trace += t->sp.rgInitialStds[i]*t->sp.rgInitialStds[i]; + t->sigma = sqrt(trace/N); /* t->sp.mueff/(0.2*t->sp.mueff+sqrt(N)) * sqrt(trace/N); */ + + t->chiN = sqrt((double) N) * (1. - 1./(4.*N) + 1./(21.*N*N)); + t->flgEigensysIsUptodate = 1; + t->flgCheckEigen = 0; + t->genOfEigensysUpdate = 0; + timings_init(&t->eigenTimings); + t->flgIniphase = 0; /* do not use iniphase, hsig does the job now */ + t->flgresumedone = 0; + t->flgStop = 0; + + for (dtest = 1.; dtest && dtest < 1.1 * dtest; dtest *= 2.) + if (dtest == dtest + 1.) + break; + t->dMaxSignifKond = dtest / 1000.; /* not sure whether this is really save, 100 does not work well enough */ + + t->gen = 0; + t->countevals = 0; + t->state = 0; + t->dLastMinEWgroesserNull = 1.0; + t->printtime = t->writetime = t->firstwritetime = t->firstprinttime = 0; + + t->rgpc = new_double(N); + t->rgps = new_double(N); + t->rgdTmp = new_double(N+1); + t->rgBDz = new_double(N); + t->rgxmean = new_double(N+2); + t->rgxmean[0] = N; + ++t->rgxmean; + t->rgxold = new_double(N+2); + t->rgxold[0] = N; + ++t->rgxold; + t->rgxbestever = new_double(N+3); + t->rgxbestever[0] = N; + ++t->rgxbestever; + t->rgout = new_double(N+2); + t->rgout[0] = N; + ++t->rgout; + t->rgD = new_double(N); + t->C = (double**)new_void(N, sizeof(double*)); + t->B = (double**)new_void(N, sizeof(double*)); + t->publicFitness = new_double(t->sp.lambda); + t->rgFuncValue = new_double(t->sp.lambda+1); + t->rgFuncValue[0]=t->sp.lambda; + ++t->rgFuncValue; + t->arFuncValueHist = new_double(10+(int)ceil(3.*10.*N/t->sp.lambda)+1); + t->arFuncValueHist[0] = (double)(10+(int)ceil(3.*10.*N/t->sp.lambda)); + t->arFuncValueHist++; + + for (i = 0; i < N; ++i) { + t->C[i] = new_double(i+1); + t->B[i] = new_double(N); + } + t->index = (int *) new_void(t->sp.lambda, sizeof(int)); + for (i = 0; i < t->sp.lambda; ++i) + t->index[i] = i; /* should not be necessary */ + t->rgrgx = (double **)new_void(t->sp.lambda, sizeof(double*)); + for (i = 0; i < t->sp.lambda; ++i) { + t->rgrgx[i] = new_double(N+2); + t->rgrgx[i][0] = N; + t->rgrgx[i]++; + } + + /* Initialize newed space */ + + for (i = 0; i < N; ++i) + for (j = 0; j < i; ++j) + t->C[i][j] = t->B[i][j] = t->B[j][i] = 0.; + + for (i = 0; i < N; ++i) { + t->B[i][i] = 1.; + t->C[i][i] = t->rgD[i] = t->sp.rgInitialStds[i] * sqrt(N / trace); + t->C[i][i] *= t->C[i][i]; + t->rgpc[i] = t->rgps[i] = 0.; + } + + t->minEW = rgdouMin(t->rgD, N); + t->minEW = t->minEW * t->minEW; + t->maxEW = rgdouMax(t->rgD, N); + t->maxEW = t->maxEW * t->maxEW; + + t->maxdiagC=t->C[0][0]; + for(i=1; imaxdiagCC[i][i]) t->maxdiagC=t->C[i][i]; + t->mindiagC=t->C[0][0]; + for(i=1; imindiagC>t->C[i][i]) t->mindiagC=t->C[i][i]; + + /* set xmean */ + for (i = 0; i < N; ++i) + t->rgxmean[i] = t->rgxold[i] = t->sp.xstart[i]; + /* use in case xstart as typicalX */ + if (t->sp.typicalXcase) + for (i = 0; i < N; ++i) + t->rgxmean[i] += t->sigma * t->rgD[i] * random_Gauss(&t->rand); + + if (strcmp(t->sp.resumefile, "_no_") != 0) + cmaes_resume_distribution(t, t->sp.resumefile); + + return (t->publicFitness); + +} /* cmaes_init() */ + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ + +void +cmaes_resume_distribution(cmaes_t *t, char *filename) { + int i, j, res, n; + double d; + FILE *fp = fopen( filename, "r"); + if(fp == NULL) { + ERRORMESSAGE("cmaes_resume_distribution(): could not open '", + filename, "'",0); + return; + } + /* count number of "resume" entries */ + i = 0; + res = 0; + while (1) { + if ((res = fscanf(fp, " resume %lg", &d)) == EOF) + break; + else if (res==0) + fscanf(fp, " %*s"); + else if(res > 0) + i += 1; + } + + /* go to last "resume" entry */ + n = i; + i = 0; + res = 0; + rewind(fp); + while (i 0) + ++i; + } + if (d != t->sp.N) + FATAL("cmaes_resume_distribution(): Dimension numbers do not match",0,0,0); + + /* find next "xmean" entry */ + while (1) { + if ((res = fscanf(fp, " xmean %lg", &d)) == EOF) + FATAL("cmaes_resume_distribution(): 'xmean' not found",0,0,0); + else if (res==0) + fscanf(fp, " %*s"); + else if(res > 0) + break; + } + + /* read xmean */ + t->rgxmean[0] = d; + res = 1; + for(i = 1; i < t->sp.N; ++i) + res += fscanf(fp, " %lg", &t->rgxmean[i]); + if (res != t->sp.N) + FATAL("cmaes_resume_distribution(): xmean: dimensions differ",0,0,0); + + /* find next "path for sigma" entry */ + while (1) { + if ((res = fscanf(fp, " path for sigma %lg", &d)) == EOF) + FATAL("cmaes_resume_distribution(): 'path for sigma' not found",0,0,0); + else if (res==0) + fscanf(fp, " %*s"); + else if(res > 0) + break; + } + + /* read ps */ + t->rgps[0] = d; + res = 1; + for(i = 1; i < t->sp.N; ++i) + res += fscanf(fp, " %lg", &t->rgps[i]); + if (res != t->sp.N) + FATAL("cmaes_resume_distribution(): ps: dimensions differ",0,0,0); + + /* find next "path for C" entry */ + while (1) { + if ((res = fscanf(fp, " path for C %lg", &d)) == EOF) + FATAL("cmaes_resume_distribution(): 'path for C' not found",0,0,0); + else if (res==0) + fscanf(fp, " %*s"); + else if(res > 0) + break; + } + /* read pc */ + t->rgpc[0] = d; + res = 1; + for(i = 1; i < t->sp.N; ++i) + res += fscanf(fp, " %lg", &t->rgpc[i]); + if (res != t->sp.N) + FATAL("cmaes_resume_distribution(): pc: dimensions differ",0,0,0); + + /* find next "sigma" entry */ + while (1) { + if ((res = fscanf(fp, " sigma %lg", &d)) == EOF) + FATAL("cmaes_resume_distribution(): 'sigma' not found",0,0,0); + else if (res==0) + fscanf(fp, " %*s"); + else if(res > 0) + break; + } + t->sigma = d; + + /* find next entry "covariance matrix" */ + while (1) { + if ((res = fscanf(fp, " covariance matrix %lg", &d)) == EOF) + FATAL("cmaes_resume_distribution(): 'covariance matrix' not found",0,0,0); + else if (res==0) + fscanf(fp, " %*s"); + else if(res > 0) + break; + } + /* read C */ + t->C[0][0] = d; + res = 1; + for (i = 1; i < t->sp.N; ++i) + for (j = 0; j <= i; ++j) + res += fscanf(fp, " %lg", &t->C[i][j]); + if (res != (t->sp.N*t->sp.N+t->sp.N)/2) + FATAL("cmaes_resume_distribution(): C: dimensions differ",0,0,0); + + t->flgIniphase = 0; + t->flgEigensysIsUptodate = 0; + t->flgresumedone = 1; + cmaes_UpdateEigensystem(t, 1); + +} /* cmaes_resume_distribution() */ +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ + +void +cmaes_exit(cmaes_t *t) { + int i, N = t->sp.N; + t->version = NULL; + /* free(t->signals_filename) */ + t->state = -1; /* not really useful at the moment */ + free( t->rgpc); + free( t->rgps); + free( t->rgdTmp); + free( t->rgBDz); + free( --t->rgxmean); + free( --t->rgxold); + free( --t->rgxbestever); + free( --t->rgout); + free( t->rgD); + for (i = 0; i < N; ++i) { + free( t->C[i]); + free( t->B[i]); + } + for (i = 0; i < t->sp.lambda; ++i) + free( --t->rgrgx[i]); + free( t->rgrgx); + free( t->C); + free( t->B); + free( t->index); + free( t->publicFitness); + free( --t->rgFuncValue); + free( --t->arFuncValueHist); + random_exit (&t->rand); + readpara_exit (&t->sp); +} /* cmaes_exit() */ + + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +double const * +cmaes_SetMean(cmaes_t *t, const double *xmean) +/* + * Distribution mean could be changed before SamplePopulation(). + * This might lead to unexpected behaviour if done repeatedly. + */ +{ + int i, N=t->sp.N; + + if (t->state >= 1 && t->state < 3) + FATAL("cmaes_SetMean: mean cannot be set inbetween the calls of ", + "SamplePopulation and UpdateDistribution",0,0); + + if (xmean != NULL && xmean != t->rgxmean) + for(i = 0; i < N; ++i) + t->rgxmean[i] = xmean[i]; + else + xmean = t->rgxmean; + + return xmean; +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +double * const * +cmaes_SamplePopulation(cmaes_t *t) { + int iNk, i, j, N=t->sp.N; + int flgdiag = ((t->sp.diagonalCov == 1) || (t->sp.diagonalCov >= t->gen)); + double sum; + double const *xmean = t->rgxmean; + + /* cmaes_SetMean(t, xmean); * xmean could be changed at this point */ + + /* calculate eigensystem */ + if (!t->flgEigensysIsUptodate) { + if (!flgdiag) + cmaes_UpdateEigensystem(t, 0); + else { + for (i = 0; i < N; ++i) + t->rgD[i] = sqrt(t->C[i][i]); + t->minEW = douSquare(rgdouMin(t->rgD, N)); + t->maxEW = douSquare(rgdouMax(t->rgD, N)); + t->flgEigensysIsUptodate = 1; + timings_start(&t->eigenTimings); + } + } + + /* treat minimal standard deviations and numeric problems */ + TestMinStdDevs(t); + + for (iNk = 0; iNk < t->sp.lambda; ++iNk) { + /* generate scaled random vector (D * z) */ + for (i = 0; i < N; ++i) + if (flgdiag) + t->rgrgx[iNk][i] = xmean[i] + t->sigma * t->rgD[i] * random_Gauss(&t->rand); + else + t->rgdTmp[i] = t->rgD[i] * random_Gauss(&t->rand); + if (!flgdiag) + /* add mutation (sigma * B * (D*z)) */ + for (i = 0; i < N; ++i) { + for (j = 0, sum = 0.; j < N; ++j) + sum += t->B[i][j] * t->rgdTmp[j]; + t->rgrgx[iNk][i] = xmean[i] + t->sigma * sum; + } + } + if(t->state == 3 || t->gen == 0) + ++t->gen; + t->state = 1; + + return(t->rgrgx); +} /* SamplePopulation() */ + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +double const * +cmaes_ReSampleSingle_old( cmaes_t *t, double *rgx) { + int i, j, N=t->sp.N; + double sum; + + if (rgx == NULL) + FATAL("cmaes_ReSampleSingle(): Missing input double *x",0,0,0); + + for (i = 0; i < N; ++i) + t->rgdTmp[i] = t->rgD[i] * random_Gauss(&t->rand); + /* add mutation (sigma * B * (D*z)) */ + for (i = 0; i < N; ++i) { + for (j = 0, sum = 0.; j < N; ++j) + sum += t->B[i][j] * t->rgdTmp[j]; + rgx[i] = t->rgxmean[i] + t->sigma * sum; + } + return rgx; +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +double * const * +cmaes_ReSampleSingle( cmaes_t *t, int iindex) { + int i, j, N=t->sp.N; + double *rgx; + double sum; + static char s[99]; + + if (iindex < 0 || iindex >= t->sp.lambda) { + sprintf(s, "index==%d must be between 0 and %d", iindex, t->sp.lambda); + FATAL("cmaes_ReSampleSingle(): Population member ",s,0,0); + } + rgx = t->rgrgx[iindex]; + + for (i = 0; i < N; ++i) + t->rgdTmp[i] = t->rgD[i] * random_Gauss(&t->rand); + /* add mutation (sigma * B * (D*z)) */ + for (i = 0; i < N; ++i) { + for (j = 0, sum = 0.; j < N; ++j) + sum += t->B[i][j] * t->rgdTmp[j]; + rgx[i] = t->rgxmean[i] + t->sigma * sum; + } + return(t->rgrgx); +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +double * +cmaes_SampleSingleInto( cmaes_t *t, double *rgx) { + int i, j, N=t->sp.N; + double sum; + + if (rgx == NULL) + rgx = new_double(N); + + for (i = 0; i < N; ++i) + t->rgdTmp[i] = t->rgD[i] * random_Gauss(&t->rand); + /* add mutation (sigma * B * (D*z)) */ + for (i = 0; i < N; ++i) { + for (j = 0, sum = 0.; j < N; ++j) + sum += t->B[i][j] * t->rgdTmp[j]; + rgx[i] = t->rgxmean[i] + t->sigma * sum; + } + return rgx; +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +double * +cmaes_PerturbSolutionInto( cmaes_t *t, double *rgx, double const *xmean, double eps) { + int i, j, N=t->sp.N; + double sum; + + if (rgx == NULL) + rgx = new_double(N); + if (xmean == NULL) + FATAL("cmaes_PerturbSolutionInto(): xmean was not given",0,0,0); + + for (i = 0; i < N; ++i) + t->rgdTmp[i] = t->rgD[i] * random_Gauss(&t->rand); + /* add mutation (sigma * B * (D*z)) */ + for (i = 0; i < N; ++i) { + for (j = 0, sum = 0.; j < N; ++j) + sum += t->B[i][j] * t->rgdTmp[j]; + rgx[i] = xmean[i] + eps * t->sigma * sum; + } + return rgx; +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +const double * +cmaes_Optimize( cmaes_t *evo, double(*pFun)(double const *, int dim), long iterations) +/* TODO: make signals.par another argument or, even better, part of cmaes_t */ +{ + const char * signalsFilename = "cmaes_signals.par"; + double *const*pop; /* sampled population */ + const char *stop; + int i; + long startiter = evo->gen; + + while(!(stop=cmaes_TestForTermination(evo)) && + (evo->gen < startiter + iterations || !iterations)) { + /* Generate population of new candidate solutions */ + pop = cmaes_SamplePopulation(evo); /* do not change content of pop */ + + /* Compute fitness value for each candidate solution */ + for (i = 0; i < cmaes_Get(evo, "popsize"); ++i) { + evo->publicFitness[i] = (*pFun)(pop[i], evo->sp.N); + } + + /* update search distribution */ + cmaes_UpdateDistribution(evo, evo->publicFitness); + + /* read control signals for output and termination */ + if (signalsFilename) + cmaes_ReadSignals(evo, signalsFilename); + fflush(stdout); + } /* while !cmaes_TestForTermination(evo) */ + + /* write some data */ + cmaes_WriteToFile(evo, "all", "allcmaes.dat"); + + return cmaes_GetPtr(evo, "xbestever"); +} + + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +double * +cmaes_UpdateDistribution( cmaes_t *t, const double *rgFunVal) { + int i, j, iNk, hsig, N=t->sp.N; + int flgdiag = ((t->sp.diagonalCov == 1) || (t->sp.diagonalCov >= t->gen)); + double sum; + double psxps; + + if(t->state == 3) + FATAL("cmaes_UpdateDistribution(): You need to call \n", + "SamplePopulation() before update can take place.",0,0); + if(rgFunVal == NULL) + FATAL("cmaes_UpdateDistribution(): ", + "Fitness function value array input is missing.",0,0); + + if(t->state == 1) /* function values are delivered here */ + t->countevals += t->sp.lambda; + else + ERRORMESSAGE("cmaes_UpdateDistribution(): unexpected state",0,0,0); + + /* assign function values */ + for (i=0; i < t->sp.lambda; ++i) + t->rgrgx[i][N] = t->rgFuncValue[i] = rgFunVal[i]; + + + /* Generate index */ + Sorted_index(rgFunVal, t->index, t->sp.lambda); + + /* Test if function values are identical, escape flat fitness */ + if (t->rgFuncValue[t->index[0]] == + t->rgFuncValue[t->index[(int)t->sp.lambda/2]]) { + t->sigma *= exp(0.2+t->sp.cs/t->sp.damps); + ERRORMESSAGE("Warning: sigma increased due to equal function values\n", + " Reconsider the formulation of the objective function",0,0); + } + + /* update function value history */ + for(i = (int)*(t->arFuncValueHist-1)-1; i > 0; --i) /* for(i = t->arFuncValueHist[-1]-1; i > 0; --i) */ + t->arFuncValueHist[i] = t->arFuncValueHist[i-1]; + t->arFuncValueHist[0] = rgFunVal[t->index[0]]; + + /* update xbestever */ + if (t->rgxbestever[N] > t->rgrgx[t->index[0]][N] || t->gen == 1) + for (i = 0; i <= N; ++i) { + t->rgxbestever[i] = t->rgrgx[t->index[0]][i]; + t->rgxbestever[N+1] = t->countevals; + } + + /* calculate xmean and rgBDz~N(0,C) */ + for (i = 0; i < N; ++i) { + t->rgxold[i] = t->rgxmean[i]; + t->rgxmean[i] = 0.; + for (iNk = 0; iNk < t->sp.mu; ++iNk) + t->rgxmean[i] += t->sp.weights[iNk] * t->rgrgx[t->index[iNk]][i]; + t->rgBDz[i] = sqrt(t->sp.mueff)*(t->rgxmean[i] - t->rgxold[i])/t->sigma; + } + + /* calculate z := D^(-1) * B^(-1) * rgBDz into rgdTmp */ + for (i = 0; i < N; ++i) { + if (!flgdiag) + for (j = 0, sum = 0.; j < N; ++j) + sum += t->B[j][i] * t->rgBDz[j]; + else + sum = t->rgBDz[i]; + t->rgdTmp[i] = sum / t->rgD[i]; + } + + /* TODO?: check length of t->rgdTmp and set an upper limit, e.g. 6 stds */ + /* in case of manipulation of arx, + this can prevent an increase of sigma by several orders of magnitude + within one step; a five-fold increase in one step can still happen. + */ + /* + for (j = 0, sum = 0.; j < N; ++j) + sum += t->rgdTmp[j] * t->rgdTmp[j]; + if (sqrt(sum) > chiN + 6. * sqrt(0.5)) { + rgdTmp length should be set to upper bound and hsig should become zero + } + */ + + /* cumulation for sigma (ps) using B*z */ + for (i = 0; i < N; ++i) { + if (!flgdiag) + for (j = 0, sum = 0.; j < N; ++j) + sum += t->B[i][j] * t->rgdTmp[j]; + else + sum = t->rgdTmp[i]; + t->rgps[i] = (1. - t->sp.cs) * t->rgps[i] + + sqrt(t->sp.cs * (2. - t->sp.cs)) * sum; + } + + /* calculate norm(ps)^2 */ + for (i = 0, psxps = 0.; i < N; ++i) + psxps += t->rgps[i] * t->rgps[i]; + + /* cumulation for covariance matrix (pc) using B*D*z~N(0,C) */ + hsig = sqrt(psxps) / sqrt(1. - pow(1.-t->sp.cs, 2*t->gen)) / t->chiN + < 1.4 + 2./(N+1); + for (i = 0; i < N; ++i) { + t->rgpc[i] = (1. - t->sp.ccumcov) * t->rgpc[i] + + hsig * sqrt(t->sp.ccumcov * (2. - t->sp.ccumcov)) * t->rgBDz[i]; + } + + /* stop initial phase */ + if (t->flgIniphase && + t->gen > douMin(1/t->sp.cs, 1+N/t->sp.mucov)) { + if (psxps / t->sp.damps / (1.-pow((1. - t->sp.cs), t->gen)) + < N * 1.05) + t->flgIniphase = 0; + } + +#if 0 + /* remove momentum in ps, if ps is large and fitness is getting worse */ + /* This is obsolete due to hsig and harmful in a dynamic environment */ + if(psxps/N > 1.5 + 10.*sqrt(2./N) + && t->arFuncValueHist[0] > t->arFuncValueHist[1] + && t->arFuncValueHist[0] > t->arFuncValueHist[2]) { + double tfac = sqrt((1 + douMax(0, log(psxps/N))) * N / psxps); + for (i=0; irgps[i] *= tfac; + psxps *= tfac*tfac; + } +#endif + + /* update of C */ + + Adapt_C2(t, hsig); + + /* Adapt_C(t); not used anymore */ + +#if 0 + if (t->sp.ccov != 0. && t->flgIniphase == 0) { + int k; + + t->flgEigensysIsUptodate = 0; + + /* update covariance matrix */ + for (i = 0; i < N; ++i) + for (j = 0; j <=i; ++j) { + t->C[i][j] = (1 - t->sp.ccov) * t->C[i][j] + + t->sp.ccov * (1./t->sp.mucov) + * (t->rgpc[i] * t->rgpc[j] + + (1-hsig)*t->sp.ccumcov*(2.-t->sp.ccumcov) * t->C[i][j]); + for (k = 0; k < t->sp.mu; ++k) /* additional rank mu update */ + t->C[i][j] += t->sp.ccov * (1-1./t->sp.mucov) * t->sp.weights[k] + * (t->rgrgx[t->index[k]][i] - t->rgxold[i]) + * (t->rgrgx[t->index[k]][j] - t->rgxold[j]) + / t->sigma / t->sigma; + } + } +#endif + + + /* update of sigma */ + t->sigma *= exp(((sqrt(psxps)/t->chiN)-1.)*t->sp.cs/t->sp.damps); + + t->state = 3; + + return (t->rgxmean); + +} /* cmaes_UpdateDistribution() */ + + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +static void +Adapt_C2(cmaes_t *t, int hsig) { + int i, j, k, N=t->sp.N; + int flgdiag = ((t->sp.diagonalCov == 1) || (t->sp.diagonalCov >= t->gen)); + + if (t->sp.ccov != 0. && t->flgIniphase == 0) { + + /* definitions for speeding up inner-most loop */ + double ccov1 = douMin(t->sp.ccov * (1./t->sp.mucov) * (flgdiag ? (N+1.5) / 3. : 1.), 1.); + double ccovmu = douMin(t->sp.ccov * (1-1./t->sp.mucov)* (flgdiag ? (N+1.5) / 3. : 1.), 1.-ccov1); + double sigmasquare = t->sigma * t->sigma; + + t->flgEigensysIsUptodate = 0; + + /* update covariance matrix */ + for (i = 0; i < N; ++i) + for (j = flgdiag ? i : 0; j <= i; ++j) { + t->C[i][j] = (1 - ccov1 - ccovmu) * t->C[i][j] + + ccov1 + * (t->rgpc[i] * t->rgpc[j] + + (1-hsig)*t->sp.ccumcov*(2.-t->sp.ccumcov) * t->C[i][j]); + for (k = 0; k < t->sp.mu; ++k) { /* additional rank mu update */ + t->C[i][j] += ccovmu * t->sp.weights[k] + * (t->rgrgx[t->index[k]][i] - t->rgxold[i]) + * (t->rgrgx[t->index[k]][j] - t->rgxold[j]) + / sigmasquare; + } + } + /* update maximal and minimal diagonal value */ + t->maxdiagC = t->mindiagC = t->C[0][0]; + for (i = 1; i < N; ++i) { + if (t->maxdiagC < t->C[i][i]) + t->maxdiagC = t->C[i][i]; + else if (t->mindiagC > t->C[i][i]) + t->mindiagC = t->C[i][i]; + } + } /* if ccov... */ +} + + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +static void +TestMinStdDevs(cmaes_t *t) +/* increases sigma */ +{ + int i, N = t->sp.N; + if (t->sp.rgDiffMinChange == NULL) + return; + + for (i = 0; i < N; ++i) + while (t->sigma * sqrt(t->C[i][i]) < t->sp.rgDiffMinChange[i]) + t->sigma *= exp(0.05+t->sp.cs/t->sp.damps); + +} /* cmaes_TestMinStdDevs() */ + + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +void cmaes_WriteToFile(cmaes_t *t, const char *key, const char *name) { + cmaes_WriteToFileAW(t, key, name, "a"); /* default is append */ +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +void cmaes_WriteToFileAW(cmaes_t *t, const char *key, const char *name, + const char *appendwrite) { + const char *s = "tmpcmaes.dat"; + FILE *fp; + + if (name == NULL) + name = s; + + fp = fopen( name, appendwrite); + + if(fp == NULL) { + ERRORMESSAGE("cmaes_WriteToFile(): could not open '", name, + "' with flag ", appendwrite); + return; + } + + if (appendwrite[0] == 'w') { + /* write a header line, very rudimentary */ + fprintf(fp, "%% # %s (randomSeed=%d, %s)\n", key, t->sp.seed, getTimeStr()); + } else if (t->gen > 0 || strncmp(name, "outcmaesfit", 11) != 0) + cmaes_WriteToFilePtr(t, key, fp); /* do not write fitness for gen==0 */ + + fclose(fp); + +} /* WriteToFile */ + +/* --------------------------------------------------------- */ +void cmaes_WriteToFilePtr(cmaes_t *t, const char *key, FILE *fp) + +/* this hack reads key words from input key for data to be written to + * a file, see file signals.par as input file. The length of the keys + * is mostly fixed, see key += number in the code! If the key phrase + * does not match the expectation the output might be strange. for + * cmaes_t *t == NULL it solely prints key as a header line. Input key + * must be zero terminated. + */ +{ + int i, k, N=(t ? t->sp.N : 0); + char const *keyend; /* *keystart; */ + const char *s = "few"; + if (key == NULL) + key = s; + /* keystart = key; for debugging purpose */ + keyend = key + strlen(key); + + while (key < keyend) { + if (strncmp(key, "axisratio", 9) == 0) { + fprintf(fp, "%.2e", sqrt(t->maxEW/t->minEW)); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "idxminSD", 8) == 0) { + int mini=0; + for(i=N-1; i>0; --i) if(t->mindiagC==t->C[i][i]) mini=i; + fprintf(fp, "%d", mini+1); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "idxmaxSD", 8) == 0) { + int maxi=0; + for(i=N-1; i>0; --i) if(t->maxdiagC==t->C[i][i]) maxi=i; + fprintf(fp, "%d", maxi+1); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + /* new coordinate system == all eigenvectors */ + if (strncmp(key, "B", 1) == 0) { + /* int j, index[N]; */ + int j, *iindex=(int*)(new_void(N,sizeof(int))); /* MT */ + Sorted_index(t->rgD, iindex, N); /* should not be necessary, see end of QLalgo2 */ + /* One eigenvector per row, sorted: largest eigenvalue first */ + for (i = 0; i < N; ++i) + for (j = 0; j < N; ++j) + fprintf(fp, "%g%c", t->B[j][iindex[N-1-i]], (j==N-1)?'\n':'\t'); + ++key; + free(iindex); /* MT */ + } + /* covariance matrix */ + if (strncmp(key, "C", 1) == 0) { + int j; + for (i = 0; i < N; ++i) + for (j = 0; j <= i; ++j) + fprintf(fp, "%g%c", t->C[i][j], (j==i)?'\n':'\t'); + ++key; + } + /* (processor) time (used) since begin of execution */ + if (strncmp(key, "clock", 4) == 0) { + timings_update(&t->eigenTimings); + fprintf(fp, "%.1f %.1f", t->eigenTimings.totaltotaltime, + t->eigenTimings.tictoctime); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + /* ratio between largest and smallest standard deviation */ + if (strncmp(key, "stddevratio", 11) == 0) { /* std dev in coordinate axes */ + fprintf(fp, "%g", sqrt(t->maxdiagC/t->mindiagC)); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + /* standard deviations in coordinate directions (sigma*sqrt(C[i,i])) */ + if (strncmp(key, "coorstddev", 10) == 0 + || strncmp(key, "stddev", 6) == 0) { /* std dev in coordinate axes */ + for (i = 0; i < N; ++i) + fprintf(fp, "%s%g", (i==0) ? "":"\t", t->sigma*sqrt(t->C[i][i])); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + /* diagonal of D == roots of eigenvalues, sorted */ + if (strncmp(key, "diag(D)", 7) == 0) { + for (i = 0; i < N; ++i) + t->rgdTmp[i] = t->rgD[i]; + qsort(t->rgdTmp, (unsigned) N, sizeof(double), &SignOfDiff); /* superfluous */ + for (i = 0; i < N; ++i) + fprintf(fp, "%s%g", (i==0) ? "":"\t", t->rgdTmp[i]); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "dim", 3) == 0) { + fprintf(fp, "%d", N); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "eval", 4) == 0) { + fprintf(fp, "%.0f", t->countevals); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "few(diag(D))", 12) == 0) { /* between four and six axes */ + int add = (int)(0.5 + (N + 1.) / 5.); + for (i = 0; i < N; ++i) + t->rgdTmp[i] = t->rgD[i]; + qsort(t->rgdTmp, (unsigned) N, sizeof(double), &SignOfDiff); + for (i = 0; i < N-1; i+=add) /* print always largest */ + fprintf(fp, "%s%g", (i==0) ? "":"\t", t->rgdTmp[N-1-i]); + fprintf(fp, "\t%g\n", t->rgdTmp[0]); /* and smallest */ + break; /* number of printed values is not determined */ + } + if (strncmp(key, "fewinfo", 7) == 0) { + fprintf(fp," Iter Fevals Function Value Sigma "); + fprintf(fp, "MaxCoorDev MinCoorDev AxisRatio MinDii Time in eig\n"); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + } + if (strncmp(key, "few", 3) == 0) { + fprintf(fp, " %4.0f ", t->gen); + fprintf(fp, " %5.0f ", t->countevals); + fprintf(fp, "%.15e", t->rgFuncValue[t->index[0]]); + fprintf(fp, " %.2e %.2e %.2e", t->sigma, t->sigma*sqrt(t->maxdiagC), + t->sigma*sqrt(t->mindiagC)); + fprintf(fp, " %.2e %.2e", sqrt(t->maxEW/t->minEW), sqrt(t->minEW)); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "funval", 6) == 0 || strncmp(key, "fitness", 6) == 0) { + fprintf(fp, "%.15e", t->rgFuncValue[t->index[0]]); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "fbestever", 9) == 0) { + fprintf(fp, "%.15e", t->rgxbestever[N]); /* f-value */ + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "fmedian", 7) == 0) { + fprintf(fp, "%.15e", t->rgFuncValue[t->index[(int)(t->sp.lambda/2)]]); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "fworst", 6) == 0) { + fprintf(fp, "%.15e", t->rgFuncValue[t->index[t->sp.lambda-1]]); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "arfunval", 8) == 0 || strncmp(key, "arfitness", 8) == 0) { + for (i = 0; i < N; ++i) + fprintf(fp, "%s%.10e", (i==0) ? "" : "\t", + t->rgFuncValue[t->index[i]]); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "gen", 3) == 0) { + fprintf(fp, "%.0f", t->gen); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "iter", 4) == 0) { + fprintf(fp, "%.0f", t->gen); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "sigma", 5) == 0) { + fprintf(fp, "%.4e", t->sigma); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "minSD", 5) == 0) { /* minimal standard deviation */ + fprintf(fp, "%.4e", sqrt(t->mindiagC)); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "maxSD", 5) == 0) { + fprintf(fp, "%.4e", sqrt(t->maxdiagC)); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "mindii", 6) == 0) { + fprintf(fp, "%.4e", sqrt(t->minEW)); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "0", 1) == 0) { + fprintf(fp, "0"); + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "lambda", 6) == 0 || strncmp(key, "popsi", 5) == 0 || strncmp(key, "populationsi", 12) == 0) { + fprintf(fp, "%d", t->sp.lambda); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "N", 1) == 0) { + fprintf(fp, "%d", N); + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "resume", 6) == 0) { + fprintf(fp, "\n# resume %d\n", N); + fprintf(fp, "xmean\n"); + cmaes_WriteToFilePtr(t, "xmean", fp); + fprintf(fp, "path for sigma\n"); + for(i=0; irgps[i], (i==N-1) ? "\n":"\t"); + fprintf(fp, "path for C\n"); + for(i=0; irgpc[i], (i==N-1) ? "\n":"\t"); + fprintf(fp, "sigma %g\n", t->sigma); + /* note than B and D might not be up-to-date */ + fprintf(fp, "covariance matrix\n"); + cmaes_WriteToFilePtr(t, "C", fp); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + } + if (strncmp(key, "xbest", 5) == 0) { /* best x in recent generation */ + for(i=0; irgrgx[t->index[0]][i]); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "xmean", 5) == 0) { + for(i=0; irgxmean[i]); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + fprintf(fp, "%c", (*key=='+') ? '\t':'\n'); + } + if (strncmp(key, "all", 3) == 0) { + time_t ti = time(NULL); + fprintf(fp, "\n# --------- %s\n", asctime(localtime(&ti))); + fprintf(fp, " N %d\n", N); + fprintf(fp, " seed %d\n", t->sp.seed); + fprintf(fp, "function evaluations %.0f\n", t->countevals); + fprintf(fp, "elapsed (CPU) time [s] %.2f\n", t->eigenTimings.totaltotaltime); + fprintf(fp, "function value f(x)=%g\n", t->rgrgx[t->index[0]][N]); + fprintf(fp, "maximal standard deviation %g\n", t->sigma*sqrt(t->maxdiagC)); + fprintf(fp, "minimal standard deviation %g\n", t->sigma*sqrt(t->mindiagC)); + fprintf(fp, "sigma %g\n", t->sigma); + fprintf(fp, "axisratio %g\n", rgdouMax(t->rgD, N)/rgdouMin(t->rgD, N)); + fprintf(fp, "xbestever found after %.0f evaluations, function value %g\n", + t->rgxbestever[N+1], t->rgxbestever[N]); + for(i=0; irgxbestever[i], + (i%5==4||i==N-1)?'\n':' '); + fprintf(fp, "xbest (of last generation, function value %g)\n", + t->rgrgx[t->index[0]][N]); + for(i=0; irgrgx[t->index[0]][i], + (i%5==4||i==N-1)?'\n':' '); + fprintf(fp, "xmean \n"); + for(i=0; irgxmean[i], + (i%5==4||i==N-1)?'\n':' '); + fprintf(fp, "Standard deviation of coordinate axes (sigma*sqrt(diag(C)))\n"); + for(i=0; isigma*sqrt(t->C[i][i]), + (i%5==4||i==N-1)?'\n':' '); + fprintf(fp, "Main axis lengths of mutation ellipsoid (sigma*diag(D))\n"); + for (i = 0; i < N; ++i) + t->rgdTmp[i] = t->rgD[i]; + qsort(t->rgdTmp, (unsigned) N, sizeof(double), &SignOfDiff); + for(i=0; isigma*t->rgdTmp[N-1-i], + (i%5==4||i==N-1)?'\n':' '); + fprintf(fp, "Longest axis (b_i where d_ii=max(diag(D))\n"); + k = MaxIdx(t->rgD, N); + for(i=0; iB[i][k], (i%5==4||i==N-1)?'\n':' '); + fprintf(fp, "Shortest axis (b_i where d_ii=max(diag(D))\n"); + k = MinIdx(t->rgD, N); + for(i=0; iB[i][k], (i%5==4||i==N-1)?'\n':' '); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + } /* "all" */ + +#if 0 /* could become generic part */ + s0 = key; + d = cmaes_Get(t, key); /* TODO find way to detect whether key was found */ + if (key == s0) { /* this does not work, is always true */ + /* write out stuff, problem: only generic format is available */ + /* move in key until "+" or end */ + } +#endif + + if (*key == '\0') + break; + else if (*key != '+') { /* last key was not recognized */ + ERRORMESSAGE("cmaes_t:WriteToFilePtr(): unrecognized key '", key, "'", 0); + while (*key != '+' && *key != '\0' && key < keyend) + ++key; + } + while (*key == '+') + ++key; + } /* while key < keyend */ + + if (key > keyend) + FATAL("cmaes_t:WriteToFilePtr(): BUG regarding key sequence",0,0,0); + +} /* WriteToFilePtr */ + +/* --------------------------------------------------------- */ +double +cmaes_Get( cmaes_t *t, char const *s) { + int N=t->sp.N; + + if (strncmp(s, "axisratio", 5) == 0) { /* between lengths of longest and shortest principal axis of the distribution ellipsoid */ + return (rgdouMax(t->rgD, N)/rgdouMin(t->rgD, N)); + } else if (strncmp(s, "eval", 4) == 0) { /* number of function evaluations */ + return (t->countevals); + } else if (strncmp(s, "fctvalue", 6) == 0 + || strncmp(s, "funcvalue", 6) == 0 + || strncmp(s, "funvalue", 6) == 0 + || strncmp(s, "fitness", 3) == 0) { /* recent best function value */ + return(t->rgFuncValue[t->index[0]]); + } else if (strncmp(s, "fbestever", 7) == 0) { /* ever best function value */ + return(t->rgxbestever[N]); + } else if (strncmp(s, "generation", 3) == 0 + || strncmp(s, "iteration", 4) == 0) { + return(t->gen); + } else if (strncmp(s, "maxeval", 4) == 0 + || strncmp(s, "MaxFunEvals", 8) == 0 + || strncmp(s, "stopMaxFunEvals", 12) == 0) { /* maximal number of function evaluations */ + return(t->sp.stopMaxFunEvals); + } else if (strncmp(s, "maxgen", 4) == 0 + || strncmp(s, "MaxIter", 7) == 0 + || strncmp(s, "stopMaxIter", 11) == 0) { /* maximal number of generations */ + return(ceil(t->sp.stopMaxIter)); + } else if (strncmp(s, "maxaxislength", 5) == 0) { /* sigma * max(diag(D)) */ + return(t->sigma * sqrt(t->maxEW)); + } else if (strncmp(s, "minaxislength", 5) == 0) { /* sigma * min(diag(D)) */ + return(t->sigma * sqrt(t->minEW)); + } else if (strncmp(s, "maxstddev", 4) == 0) { /* sigma * sqrt(max(diag(C))) */ + return(t->sigma * sqrt(t->maxdiagC)); + } else if (strncmp(s, "minstddev", 4) == 0) { /* sigma * sqrt(min(diag(C))) */ + return(t->sigma * sqrt(t->mindiagC)); + } else if (strncmp(s, "N", 1) == 0 || strcmp(s, "n") == 0 || + strncmp(s, "dimension", 3) == 0) { + return (N); + } else if (strncmp(s, "lambda", 3) == 0 + || strncmp(s, "samplesize", 8) == 0 + || strncmp(s, "popsize", 7) == 0) { /* sample size, offspring population size */ + return(t->sp.lambda); + } else if (strncmp(s, "sigma", 3) == 0) { + return(t->sigma); + } + FATAL( "cmaes_Get(cmaes_t, char const * s): No match found for s='", s, "'",0); + return(0); +} /* cmaes_Get() */ + +/* --------------------------------------------------------- */ +double * +cmaes_GetInto( cmaes_t *t, char const *s, double *res) { + int i, N = t->sp.N; + double const * res0 = cmaes_GetPtr(t, s); + if (res == NULL) + res = new_double(N); + for (i = 0; i < N; ++i) + res[i] = res0[i]; + return res; +} + +/* --------------------------------------------------------- */ +double * +cmaes_GetNew( cmaes_t *t, char const *s) { + return (cmaes_GetInto(t, s, NULL)); +} + +/* --------------------------------------------------------- */ +const double * +cmaes_GetPtr( cmaes_t *t, char const *s) { + int i, N=t->sp.N; + + /* diagonal of covariance matrix */ + if (strncmp(s, "diag(C)", 7) == 0) { + for (i = 0; i < N; ++i) + t->rgout[i] = t->C[i][i]; + return(t->rgout); + } + /* diagonal of axis lengths matrix */ + else if (strncmp(s, "diag(D)", 7) == 0) { + return(t->rgD); + } + /* vector of standard deviations sigma*sqrt(diag(C)) */ + else if (strncmp(s, "stddev", 3) == 0) { + for (i = 0; i < N; ++i) + t->rgout[i] = t->sigma * sqrt(t->C[i][i]); + return(t->rgout); + } + /* bestever solution seen so far */ + else if (strncmp(s, "xbestever", 7) == 0) + return(t->rgxbestever); + /* recent best solution of the recent population */ + else if (strncmp(s, "xbest", 5) == 0) + return(t->rgrgx[t->index[0]]); + /* mean of the recent distribution */ + else if (strncmp(s, "xmean", 1) == 0) + return(t->rgxmean); + + return(NULL); +} + +/* --------------------------------------------------------- */ +/* tests stopping criteria + * returns a string of satisfied stopping criterion for each line + * otherwise NULL +*/ +const char * +cmaes_TestForTermination( cmaes_t *t) { + double range, fac; + int iAchse, iKoo; + int flgdiag = ((t->sp.diagonalCov == 1) || (t->sp.diagonalCov >= t->gen)); + static char sTestOutString[3024]; + char * cp = sTestOutString; + int i, cTemp, N=t->sp.N; + cp[0] = '\0'; + + /* function value reached */ + if ((t->gen > 1 || t->state > 1) && t->sp.stStopFitness.flg && + t->rgFuncValue[t->index[0]] <= t->sp.stStopFitness.val) + cp += sprintf(cp, "Fitness: function value %7.2e <= stopFitness (%7.2e)\n", + t->rgFuncValue[t->index[0]], t->sp.stStopFitness.val); + + /* TolFun */ + range = douMax(rgdouMax(t->arFuncValueHist, (int)douMin(t->gen,*(t->arFuncValueHist-1))), + rgdouMax(t->rgFuncValue, t->sp.lambda)) - + douMin(rgdouMin(t->arFuncValueHist, (int)douMin(t->gen, *(t->arFuncValueHist-1))), + rgdouMin(t->rgFuncValue, t->sp.lambda)); + + if (t->gen > 0 && range <= t->sp.stopTolFun) { + cp += sprintf(cp, + "TolFun: function value differences %7.2e < stopTolFun=%7.2e\n", + range, t->sp.stopTolFun); + } + + /* TolFunHist */ + if (t->gen > *(t->arFuncValueHist-1)) { + range = rgdouMax(t->arFuncValueHist, (int)*(t->arFuncValueHist-1)) + - rgdouMin(t->arFuncValueHist, (int)*(t->arFuncValueHist-1)); + if (range <= t->sp.stopTolFunHist) + cp += sprintf(cp, + "TolFunHist: history of function value changes %7.2e stopTolFunHist=%7.2e", + range, t->sp.stopTolFunHist); + } + + /* TolX */ + for(i=0, cTemp=0; isigma * sqrt(t->C[i][i]) < t->sp.stopTolX) ? 1 : 0; + cTemp += (t->sigma * t->rgpc[i] < t->sp.stopTolX) ? 1 : 0; + } + if (cTemp == 2*N) { + cp += sprintf(cp, + "TolX: object variable changes below %7.2e \n", + t->sp.stopTolX); + } + + /* TolUpX */ + for(i=0; isigma * sqrt(t->C[i][i]) > t->sp.stopTolUpXFactor * t->sp.rgInitialStds[i]) + break; + } + if (i < N) { + cp += sprintf(cp, + "TolUpX: standard deviation increased by more than %7.2e, larger initial standard deviation recommended \n", + t->sp.stopTolUpXFactor); + } + + /* Condition of C greater than dMaxSignifKond */ + if (t->maxEW >= t->minEW * t->dMaxSignifKond) { + cp += sprintf(cp, + "ConditionNumber: maximal condition number %7.2e reached. maxEW=%7.2e,minEW=%7.2e,maxdiagC=%7.2e,mindiagC=%7.2e\n", + t->dMaxSignifKond, t->maxEW, t->minEW, t->maxdiagC, t->mindiagC); + } /* if */ + + /* Principal axis i has no effect on xmean, ie. + x == x + 0.1 * sigma * rgD[i] * B[i] */ + if (!flgdiag) { + for (iAchse = 0; iAchse < N; ++iAchse) { + fac = 0.1 * t->sigma * t->rgD[iAchse]; + for (iKoo = 0; iKoo < N; ++iKoo) { + if (t->rgxmean[iKoo] != t->rgxmean[iKoo] + fac * t->B[iKoo][iAchse]) + break; + } + if (iKoo == N) { + /* t->sigma *= exp(0.2+t->sp.cs/t->sp.damps); */ + cp += sprintf(cp, + "NoEffectAxis: standard deviation 0.1*%7.2e in principal axis %d without effect\n", + fac/0.1, iAchse); + break; + } /* if (iKoo == N) */ + } /* for iAchse */ + } /* if flgdiag */ + /* Component of xmean is not changed anymore */ + for (iKoo = 0; iKoo < N; ++iKoo) { + if (t->rgxmean[iKoo] == t->rgxmean[iKoo] + + 0.2*t->sigma*sqrt(t->C[iKoo][iKoo])) { + /* t->C[iKoo][iKoo] *= (1 + t->sp.ccov); */ + /* flg = 1; */ + cp += sprintf(cp, + "NoEffectCoordinate: standard deviation 0.2*%7.2e in coordinate %d without effect\n", + t->sigma*sqrt(t->C[iKoo][iKoo]), iKoo); + break; + } + + } /* for iKoo */ + /* if (flg) t->sigma *= exp(0.05+t->sp.cs/t->sp.damps); */ + + if(t->countevals >= t->sp.stopMaxFunEvals) + cp += sprintf(cp, "MaxFunEvals: conducted function evaluations %.0f >= %g\n", + t->countevals, t->sp.stopMaxFunEvals); + if(t->gen >= t->sp.stopMaxIter) + cp += sprintf(cp, "MaxIter: number of iterations %.0f >= %g\n", + t->gen, t->sp.stopMaxIter); + if(t->flgStop) + cp += sprintf(cp, "Manual: stop signal read\n"); + +#if 0 + else if (0) { + for(i=0, cTemp=0; i320) + ERRORMESSAGE("Bug in cmaes_t:Test(): sTestOutString too short",0,0,0); + + if (cp != sTestOutString) { + return sTestOutString; + } + + return(NULL); + +} /* cmaes_Test() */ + +/* --------------------------------------------------------- */ +void cmaes_ReadSignals(cmaes_t *t, char const *filename) { + const char *s = "cmaes_signals.par"; + FILE *fp; + if (filename == NULL) + filename = s; + /* if (filename) assign_string(&(t->signalsFilename), filename)*/ + fp = fopen( filename, "r"); + if(fp == NULL) { + return; + } + cmaes_ReadFromFilePtr( t, fp); + fclose(fp); +} +/* --------------------------------------------------------- */ +void cmaes_ReadFromFilePtr( cmaes_t *t, FILE *fp) +/* reading commands e.g. from signals.par file +*/ +{ + const char *keys[15]; /* key strings for scanf */ + char s[199], sin1[99], sin2[129], sin3[99], sin4[99]; + int ikey, ckeys, nb; + double d; + static int flglockprint = 0; + static int flglockwrite = 0; + static long countiterlastwritten; + static long maxdiffitertowrite; /* to prevent long gaps at the beginning */ + int flgprinted = 0; + int flgwritten = 0; + double deltaprinttime = time(NULL)-t->printtime; /* using clock instead might not be a good */ + double deltawritetime = time(NULL)-t->writetime; /* idea as disc time is not CPU time? */ + double deltaprinttimefirst = t->firstprinttime ? time(NULL)-t->firstprinttime : 0; /* time is in seconds!? */ + double deltawritetimefirst = t->firstwritetime ? time(NULL)-t->firstwritetime : 0; + if (countiterlastwritten > t->gen) { /* probably restarted */ + maxdiffitertowrite = 0; + countiterlastwritten = 0; + } + + keys[0] = " stop%98s %98s"; /* s=="now" or eg "MaxIter+" %lg"-number */ + /* works with and without space */ + keys[1] = " print %98s %98s"; /* s==keyword for WriteFile */ + keys[2] = " write %98s %128s %98s"; /* s1==keyword, s2==filename */ + keys[3] = " check%98s %98s"; + keys[4] = " maxTimeFractionForEigendecompostion %98s"; + ckeys = 5; + strcpy(sin2, "tmpcmaes.dat"); + + if (cmaes_TestForTermination(t)) { + deltaprinttime = time(NULL); /* forces printing */ + deltawritetime = time(NULL); + } + while(fgets(s, sizeof(s), fp) != NULL) { + if (s[0] == '#' || s[0] == '%') /* skip comments */ + continue; + sin1[0] = sin2[0] = sin3[0] = sin4[0] = '\0'; + for (ikey=0; ikey < ckeys; ++ikey) { + if((nb=sscanf(s, keys[ikey], sin1, sin2, sin3, sin4)) >= 1) { + switch(ikey) { + case 0 : /* "stop", reads "stop now" or eg. stopMaxIter */ + if (strncmp(sin1, "now", 3) == 0) + t->flgStop = 1; + else if (strncmp(sin1, "MaxFunEvals", 11) == 0) { + if (sscanf(sin2, " %lg", &d) == 1) + t->sp.stopMaxFunEvals = d; + } else if (strncmp(sin1, "MaxIter", 4) == 0) { + if (sscanf(sin2, " %lg", &d) == 1) + t->sp.stopMaxIter = d; + } else if (strncmp(sin1, "Fitness", 7) == 0) { + if (sscanf(sin2, " %lg", &d) == 1) { + t->sp.stStopFitness.flg = 1; + t->sp.stStopFitness.val = d; + } + } else if (strncmp(sin1, "TolFunHist", 10) == 0) { + if (sscanf(sin2, " %lg", &d) == 1) + t->sp.stopTolFunHist = d; + } else if (strncmp(sin1, "TolFun", 6) == 0) { + if (sscanf(sin2, " %lg", &d) == 1) + t->sp.stopTolFun = d; + } else if (strncmp(sin1, "TolX", 4) == 0) { + if (sscanf(sin2, " %lg", &d) == 1) + t->sp.stopTolX = d; + } else if (strncmp(sin1, "TolUpXFactor", 4) == 0) { + if (sscanf(sin2, " %lg", &d) == 1) + t->sp.stopTolUpXFactor = d; + } + break; + case 1 : /* "print" */ + d = 1; /* default */ + if (sscanf(sin2, "%lg", &d) < 1 && deltaprinttimefirst < 1) + d = 0; /* default at first time */ + if (deltaprinttime >= d && !flglockprint) { + cmaes_WriteToFilePtr(t, sin1, stdout); + flgprinted = 1; + } + if(d < 0) + flglockprint += 2; + break; + case 2 : /* "write" */ + /* write header, before first generation */ + if (t->countevals < t->sp.lambda && t->flgresumedone == 0) + cmaes_WriteToFileAW(t, sin1, sin2, "w"); /* overwrite */ + d = 0.9; /* default is one with smooth increment of gaps */ + if (sscanf(sin3, "%lg", &d) < 1 && deltawritetimefirst < 2) + d = 0; /* default is zero for the first second */ + if(d < 0) + flglockwrite += 2; + if (!flglockwrite) { + if (deltawritetime >= d) { + cmaes_WriteToFile(t, sin1, sin2); + flgwritten = 1; + } else if (d < 1 + && t->gen-countiterlastwritten > maxdiffitertowrite) { + cmaes_WriteToFile(t, sin1, sin2); + flgwritten = 1; + } + } + break; + case 3 : /* check, checkeigen 1 or check eigen 1 */ + if (strncmp(sin1, "eigen", 5) == 0) { + if (sscanf(sin2, " %lg", &d) == 1) { + if (d > 0) + t->flgCheckEigen = 1; + else + t->flgCheckEigen = 0; + } else + t->flgCheckEigen = 0; + } + break; + case 4 : /* maxTimeFractionForEigendecompostion */ + if (sscanf(sin1, " %lg", &d) == 1) + t->sp.updateCmode.maxtime = d; + break; + default : + break; + } + break; /* for ikey */ + } /* if line contains keyword */ + } /* for each keyword */ + } /* while not EOF of signals.par */ + if (t->writetime == 0) + t->firstwritetime = time(NULL); + if (t->printtime == 0) + t->firstprinttime = time(NULL); + + if (flgprinted) + t->printtime = time(NULL); + if (flgwritten) { + t->writetime = time(NULL); + if (t->gen-countiterlastwritten > maxdiffitertowrite) + ++maxdiffitertowrite; /* smooth prolongation of writing gaps/intervals */ + countiterlastwritten = (long int) t->gen; + } + --flglockprint; + --flglockwrite; + flglockprint = (flglockprint > 0) ? 1 : 0; + flglockwrite = (flglockwrite > 0) ? 1 : 0; +} /* cmaes_ReadFromFilePtr */ + +/* ========================================================= */ +static int +Check_Eigen( int N, double **C, double *diag, double **Q) +/* + exhaustive test of the output of the eigendecomposition + needs O(n^3) operations + + writes to error file + returns number of detected inaccuracies +*/ +{ + /* compute Q diag Q^T and Q Q^T to check */ + int i, j, k, res = 0; + double cc, dd; + static char s[324]; + + for (i=0; i < N; ++i) + for (j=0; j < N; ++j) { + for (cc=0.,dd=0., k=0; k < N; ++k) { + cc += diag[k] * Q[i][k] * Q[j][k]; + dd += Q[i][k] * Q[j][k]; + } + /* check here, is the normalization the right one? */ + if (fabs(cc - C[i>j?i:j][i>j?j:i])/sqrt(C[i][i]*C[j][j]) > 1e-10 + && fabs(cc - C[i>j?i:j][i>j?j:i]) > 3e-14) { + sprintf(s, "%d %d: %.17e %.17e, %e", + i, j, cc, C[i>j?i:j][i>j?j:i], cc-C[i>j?i:j][i>j?j:i]); + ERRORMESSAGE("cmaes_t:Eigen(): imprecise result detected ", + s, 0, 0); + ++res; + } + if (fabs(dd - (i==j)) > 1e-10) { + sprintf(s, "%d %d %.17e ", i, j, dd); + ERRORMESSAGE("cmaes_t:Eigen(): imprecise result detected (Q not orthog.)", + s, 0, 0); + ++res; + } + } + return res; +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +void +cmaes_UpdateEigensystem(cmaes_t *t, int flgforce) { + int i, N = t->sp.N; + + timings_update(&t->eigenTimings); + + if(flgforce == 0) { + if (t->flgEigensysIsUptodate == 1) + return; + + /* return on modulo generation number */ + if (t->sp.updateCmode.flgalways == 0 /* not implemented, always ==0 */ + && t->gen < t->genOfEigensysUpdate + t->sp.updateCmode.modulo + ) + return; + + /* return on time percentage */ + if (t->sp.updateCmode.maxtime < 1.00 + && t->eigenTimings.tictoctime > t->sp.updateCmode.maxtime * t->eigenTimings.totaltime + && t->eigenTimings.tictoctime > 0.0002) + return; + } + timings_tic(&t->eigenTimings); + + Eigen( N, t->C, t->rgD, t->B, t->rgdTmp); + + timings_toc(&t->eigenTimings); + + /* find largest and smallest eigenvalue, they are supposed to be sorted anyway */ + t->minEW = rgdouMin(t->rgD, N); + t->maxEW = rgdouMax(t->rgD, N); + + if (t->flgCheckEigen) + /* needs O(n^3)! writes, in case, error message in error file */ + i = Check_Eigen( N, t->C, t->rgD, t->B); + +#if 0 + /* Limit Condition of C to dMaxSignifKond+1 */ + if (t->maxEW > t->minEW * t->dMaxSignifKond) { + ERRORMESSAGE("Warning: Condition number of covariance matrix at upper limit.", + " Consider a rescaling or redesign of the objective function. " ,"",""); + printf("\nWarning: Condition number of covariance matrix at upper limit\n"); + tmp = t->maxEW/t->dMaxSignifKond - t->minEW; + tmp = t->maxEW/t->dMaxSignifKond; + t->minEW += tmp; + for (i=0; iC[i][i] += tmp; + t->rgD[i] += tmp; + } + } /* if */ + t->dLastMinEWgroesserNull = minEW; +#endif + + for (i = 0; i < N; ++i) + t->rgD[i] = sqrt(t->rgD[i]); + + t->flgEigensysIsUptodate = 1; + t->genOfEigensysUpdate = t->gen; + + return; + +} /* cmaes_UpdateEigensystem() */ + + +/* ========================================================= */ +static void +Eigen( int N, double **C, double *diag, double **Q, double *rgtmp) +/* + Calculating eigenvalues and vectors. + Input: + N: dimension. + C: symmetric (1:N)xN-matrix, solely used to copy data to Q + niter: number of maximal iterations for QL-Algorithm. + rgtmp: N+1-dimensional vector for temporal use. + Output: + diag: N eigenvalues. + Q: Columns are normalized eigenvectors. + */ +{ + int i, j; + + if (rgtmp == NULL) /* was OK in former versions */ + FATAL("cmaes_t:Eigen(): input parameter double *rgtmp must be non-NULL", 0,0,0); + + /* copy C to Q */ + if (C != Q) { + for (i=0; i < N; ++i) + for (j = 0; j <= i; ++j) + Q[i][j] = Q[j][i] = C[i][j]; + } + +#if 0 + Householder( N, Q, diag, rgtmp); + QLalgo( N, diag, Q, 30*N, rgtmp+1); +#else + Householder2( N, Q, diag, rgtmp); + QLalgo2( N, diag, rgtmp, Q); +#endif + +} + + +/* ========================================================= */ +static void +QLalgo2 (int n, double *d, double *e, double **V) { + /* + -> n : Dimension. + -> d : Diagonale of tridiagonal matrix. + -> e[1..n-1] : off-diagonal, output from Householder + -> V : matrix output von Householder + <- d : eigenvalues + <- e : garbage? + <- V : basis of eigenvectors, according to d + + Symmetric tridiagonal QL algorithm, iterative + Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations + + code adapted from Java JAMA package, function tql2. + */ + + int i, k, l, m; + double f = 0.0; + double tst1 = 0.0; + double eps = 2.22e-16; /* Math.pow(2.0,-52.0); == 2.22e-16 */ + + /* shift input e */ + for (i = 1; i < n; i++) { + e[i-1] = e[i]; + } + e[n-1] = 0.0; /* never changed again */ + + for (l = 0; l < n; l++) { + + /* Find small subdiagonal element */ + + if (tst1 < fabs(d[l]) + fabs(e[l])) + tst1 = fabs(d[l]) + fabs(e[l]); + m = l; + while (m < n) { + if (fabs(e[m]) <= eps*tst1) { + /* if (fabs(e[m]) + fabs(d[m]+d[m+1]) == fabs(d[m]+d[m+1])) { */ + break; + } + m++; + } + + /* If m == l, d[l] is an eigenvalue, */ + /* otherwise, iterate. */ + + if (m > l) { /* TODO: check the case m == n, should be rejected here!? */ + int iter = 0; + do { /* while (fabs(e[l]) > eps*tst1); */ + double dl1, h; + double g = d[l]; + double p = (d[l+1] - g) / (2.0 * e[l]); + double r = myhypot(p, 1.); + + iter = iter + 1; /* Could check iteration count here */ + + /* Compute implicit shift */ + + if (p < 0) { + r = -r; + } + d[l] = e[l] / (p + r); + d[l+1] = e[l] * (p + r); + dl1 = d[l+1]; + h = g - d[l]; + for (i = l+2; i < n; i++) { + d[i] -= h; + } + f = f + h; + + /* Implicit QL transformation. */ + + p = d[m]; + { + double c = 1.0; + double c2 = c; + double c3 = c; + double el1 = e[l+1]; + double s = 0.0; + double s2 = 0.0; + for (i = m-1; i >= l; i--) { + c3 = c2; + c2 = c; + s2 = s; + g = c * e[i]; + h = c * p; + r = myhypot(p, e[i]); + e[i+1] = s * r; + s = e[i] / r; + c = p / r; + p = c * d[i] - s * g; + d[i+1] = h + s * (c * g + s * d[i]); + + /* Accumulate transformation. */ + + for (k = 0; k < n; k++) { + h = V[k][i+1]; + V[k][i+1] = s * V[k][i] + c * h; + V[k][i] = c * V[k][i] - s * h; + } + } + p = -s * s2 * c3 * el1 * e[l] / dl1; + e[l] = s * p; + d[l] = c * p; + } + + /* Check for convergence. */ + + } while (fabs(e[l]) > eps*tst1); + } + d[l] = d[l] + f; + e[l] = 0.0; + } + + /* Sort eigenvalues and corresponding vectors. */ +#if 1 + /* TODO: really needed here? So far not, but practical and only O(n^2) */ + { + int j; + double p; + for (i = 0; i < n-1; i++) { + k = i; + p = d[i]; + for (j = i+1; j < n; j++) { + if (d[j] < p) { + k = j; + p = d[j]; + } + } + if (k != i) { + d[k] = d[i]; + d[i] = p; + for (j = 0; j < n; j++) { + p = V[j][i]; + V[j][i] = V[j][k]; + V[j][k] = p; + } + } + } + } +#endif +} /* QLalgo2 */ + + +/* ========================================================= */ +static void +Householder2(int n, double **V, double *d, double *e) { + /* + Householder transformation of a symmetric matrix V into tridiagonal form. + -> n : dimension + -> V : symmetric nxn-matrix + <- V : orthogonal transformation matrix: + tridiag matrix == V * V_in * V^t + <- d : diagonal + <- e[0..n-1] : off diagonal (elements 1..n-1) + + code slightly adapted from the Java JAMA package, function private tred2() + + */ + + int i,j,k; + + for (j = 0; j < n; j++) { + d[j] = V[n-1][j]; + } + + /* Householder reduction to tridiagonal form */ + + for (i = n-1; i > 0; i--) { + + /* Scale to avoid under/overflow */ + + double scale = 0.0; + double h = 0.0; + for (k = 0; k < i; k++) { + scale = scale + fabs(d[k]); + } + if (scale == 0.0) { + e[i] = d[i-1]; + for (j = 0; j < i; j++) { + d[j] = V[i-1][j]; + V[i][j] = 0.0; + V[j][i] = 0.0; + } + } else { + + /* Generate Householder vector */ + + double f, g, hh; + + for (k = 0; k < i; k++) { + d[k] /= scale; + h += d[k] * d[k]; + } + f = d[i-1]; + g = sqrt(h); + if (f > 0) { + g = -g; + } + e[i] = scale * g; + h = h - f * g; + d[i-1] = f - g; + for (j = 0; j < i; j++) { + e[j] = 0.0; + } + + /* Apply similarity transformation to remaining columns */ + + for (j = 0; j < i; j++) { + f = d[j]; + V[j][i] = f; + g = e[j] + V[j][j] * f; + for (k = j+1; k <= i-1; k++) { + g += V[k][j] * d[k]; + e[k] += V[k][j] * f; + } + e[j] = g; + } + f = 0.0; + for (j = 0; j < i; j++) { + e[j] /= h; + f += e[j] * d[j]; + } + hh = f / (h + h); + for (j = 0; j < i; j++) { + e[j] -= hh * d[j]; + } + for (j = 0; j < i; j++) { + f = d[j]; + g = e[j]; + for (k = j; k <= i-1; k++) { + V[k][j] -= (f * e[k] + g * d[k]); + } + d[j] = V[i-1][j]; + V[i][j] = 0.0; + } + } + d[i] = h; + } + + /* Accumulate transformations */ + + for (i = 0; i < n-1; i++) { + double h; + V[n-1][i] = V[i][i]; + V[i][i] = 1.0; + h = d[i+1]; + if (h != 0.0) { + for (k = 0; k <= i; k++) { + d[k] = V[k][i+1] / h; + } + for (j = 0; j <= i; j++) { + double g = 0.0; + for (k = 0; k <= i; k++) { + g += V[k][i+1] * V[k][j]; + } + for (k = 0; k <= i; k++) { + V[k][j] -= g * d[k]; + } + } + } + for (k = 0; k <= i; k++) { + V[k][i+1] = 0.0; + } + } + for (j = 0; j < n; j++) { + d[j] = V[n-1][j]; + V[n-1][j] = 0.0; + } + V[n-1][n-1] = 1.0; + e[0] = 0.0; + +} /* Housholder() */ + + +#if 0 +/* ========================================================= */ +static void +WriteMaxErrorInfo(cmaes_t *t) { + int i,j, N=t->sp.N; + char *s = (char *)new_void(200+30*(N+2), sizeof(char)); + s[0] = '\0'; + + sprintf( s+strlen(s),"\nComplete Info\n"); + sprintf( s+strlen(s)," Gen %20.12g\n", t->gen); + sprintf( s+strlen(s)," Dimension %d\n", N); + sprintf( s+strlen(s)," sigma %e\n", t->sigma); + sprintf( s+strlen(s)," lastminEW %e\n", + t->dLastMinEWgroesserNull); + sprintf( s+strlen(s)," maxKond %e\n\n", t->dMaxSignifKond); + sprintf( s+strlen(s)," x-vector rgD Basis...\n"); + ERRORMESSAGE( s,0,0,0); + s[0] = '\0'; + for (i = 0; i < N; ++i) { + sprintf( s+strlen(s), " %20.12e", t->rgxmean[i]); + sprintf( s+strlen(s), " %10.4e", t->rgD[i]); + for (j = 0; j < N; ++j) + sprintf( s+strlen(s), " %10.2e", t->B[i][j]); + ERRORMESSAGE( s,0,0,0); + s[0] = '\0'; + } + ERRORMESSAGE( "\n",0,0,0); + free( s); +} /* WriteMaxErrorInfo() */ +#endif + +/* --------------------------------------------------------- */ +/* --------------- Functions: timings_t -------------------- */ +/* --------------------------------------------------------- */ +/* timings_t measures overall time and times between calls + * of tic and toc. For small time spans (up to 1000 seconds) + * CPU time via clock() is used. For large time spans the + * fall-back to elapsed time from time() is used. + * timings_update() must be called often enough to prevent + * the fallback. */ +/* --------------------------------------------------------- */ +void +timings_init(timings_t *t) { + t->totaltotaltime = 0; + timings_start(t); +} +void +timings_start(timings_t *t) { + t->totaltime = 0; + t->tictoctime = 0; + t->lasttictoctime = 0; + t->istic = 0; + t->lastclock = clock(); + t->lasttime = time(NULL); + t->lastdiff = 0; + t->tictoczwischensumme = 0; + t->isstarted = 1; +} + +double +timings_update(timings_t *t) { + /* returns time between last call of timings_*() and now, + * should better return totaltime or tictoctime? + */ + double diffc, difft; + clock_t lc = t->lastclock; /* measure CPU in 1e-6s */ + time_t lt = t->lasttime; /* measure time in s */ + + if (t->isstarted != 1) + FATAL("timings_started() must be called before using timings... functions",0,0,0); + + t->lastclock = clock(); /* measures at most 2147 seconds, where 1s = 1e6 CLOCKS_PER_SEC */ + t->lasttime = time(NULL); + + diffc = (double)(t->lastclock - lc) / CLOCKS_PER_SEC; /* is presumably in [-21??, 21??] */ + difft = difftime(t->lasttime, lt); /* is presumably an integer */ + + t->lastdiff = difft; /* on the "save" side */ + + /* use diffc clock measurement if appropriate */ + if (diffc > 0 && difft < 1000) + t->lastdiff = diffc; + + if (t->lastdiff < 0) + FATAL("BUG in time measurement", 0, 0, 0); + + t->totaltime += t->lastdiff; + t->totaltotaltime += t->lastdiff; + if (t->istic) { + t->tictoczwischensumme += t->lastdiff; + t->tictoctime += t->lastdiff; + } + + return t->lastdiff; +} + +void +timings_tic(timings_t *t) { + if (t->istic) { /* message not necessary ? */ + ERRORMESSAGE("Warning: timings_tic called twice without toc",0,0,0); + return; + } + timings_update(t); + t->istic = 1; +} + +double +timings_toc(timings_t *t) { + if (!t->istic) { + ERRORMESSAGE("Warning: timings_toc called without tic",0,0,0); + return -1; + } + timings_update(t); + t->lasttictoctime = t->tictoczwischensumme; + t->tictoczwischensumme = 0; + t->istic = 0; + return t->lasttictoctime; +} + +/* --------------------------------------------------------- */ +/* ---------------- Functions: random_t -------------------- */ +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +/* X_1 exakt : 0.79788456) */ +/* chi_eins simuliert : 0.798xx (seed -3) */ +/* +-0.001 */ +/* --------------------------------------------------------- */ +/* + Gauss() liefert normalverteilte Zufallszahlen + bei vorgegebenem seed. +*/ +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ + +long +random_init( random_t *t, long unsigned inseed) { + clock_t cloc = clock(); + + t->flgstored = 0; + t->rgrand = (long *) new_void(32, sizeof(long)); + if (inseed < 1) { + while ((long) (cloc - clock()) == 0) + ; /* TODO: remove this for time critical applications? */ + inseed = (long unsigned)abs((long)(100*time(NULL)+clock())); + } + return random_Start(t, inseed); +} + +void +random_exit(random_t *t) { + free( t->rgrand); +} + +/* --------------------------------------------------------- */ +long random_Start( random_t *t, long unsigned inseed) { + long tmp; + int i; + + t->flgstored = 0; + t->startseed = inseed; /* purely for bookkeeping */ + while (inseed > 2e9) + inseed /= 2; /* prevent infinite loop on 32 bit system */ + if (inseed < 1) + inseed = 1; + t->aktseed = inseed; + for (i = 39; i >= 0; --i) { + tmp = t->aktseed/127773; + t->aktseed = 16807 * (t->aktseed - tmp * 127773) + - 2836 * tmp; + if (t->aktseed < 0) t->aktseed += 2147483647; + if (i < 32) + t->rgrand[i] = t->aktseed; + } + t->aktrand = t->rgrand[0]; + return inseed; +} + +/* --------------------------------------------------------- */ +double random_Gauss(random_t *t) { + double x1, x2, rquad, fac; + + if (t->flgstored) { + t->flgstored = 0; + return t->hold; + } + do { + x1 = 2.0 * random_Uniform(t) - 1.0; + x2 = 2.0 * random_Uniform(t) - 1.0; + rquad = x1*x1 + x2*x2; + } while(rquad >= 1 || rquad <= 0); + fac = sqrt(-2.0*log(rquad)/rquad); + t->flgstored = 1; + t->hold = fac * x1; + return fac * x2; +} + +/* --------------------------------------------------------- */ +double random_Uniform( random_t *t) { + long tmp; + + tmp = t->aktseed/127773; + t->aktseed = 16807 * (t->aktseed - tmp * 127773) + - 2836 * tmp; + if (t->aktseed < 0) + t->aktseed += 2147483647; + tmp = t->aktrand / 67108865; + t->aktrand = t->rgrand[tmp]; + t->rgrand[tmp] = t->aktseed; + return (double)(t->aktrand)/(2.147483647e9); +} + +static char * +szCat(const char *sz1, const char*sz2, + const char *sz3, const char *sz4); + +/* --------------------------------------------------------- */ +/* -------------- Functions: readpara_t -------------------- */ +/* --------------------------------------------------------- */ +void +readpara_init (readpara_t *t, + int dim, + int inseed, + const double * inxstart, + const double * inrgsigma, + int lambda, + const char * filename) { + int i, N; + /* TODO: make sure readpara_init has not been called already */ + t->filename = NULL; /* set after successful Read */ + t->rgsformat = (const char **) new_void(55, sizeof(char *)); + t->rgpadr = (void **) new_void(55, sizeof(void *)); + t->rgskeyar = (const char **) new_void(11, sizeof(char *)); + t->rgp2adr = (double ***) new_void(11, sizeof(double **)); + t->weigkey = (char *)new_void(7, sizeof(char)); + + /* All scalars: */ + i = 0; + t->rgsformat[i] = " N %d"; + t->rgpadr[i++] = (void *) &t->N; + t->rgsformat[i] = " seed %d"; + t->rgpadr[i++] = (void *) &t->seed; + t->rgsformat[i] = " stopMaxFunEvals %lg"; + t->rgpadr[i++] = (void *) &t->stopMaxFunEvals; + t->rgsformat[i] = " stopMaxIter %lg"; + t->rgpadr[i++] = (void *) &t->stopMaxIter; + t->rgsformat[i] = " stopFitness %lg"; + t->rgpadr[i++]=(void *) &t->stStopFitness.val; + t->rgsformat[i] = " stopTolFun %lg"; + t->rgpadr[i++]=(void *) &t->stopTolFun; + t->rgsformat[i] = " stopTolFunHist %lg"; + t->rgpadr[i++]=(void *) &t->stopTolFunHist; + t->rgsformat[i] = " stopTolX %lg"; + t->rgpadr[i++]=(void *) &t->stopTolX; + t->rgsformat[i] = " stopTolUpXFactor %lg"; + t->rgpadr[i++]=(void *) &t->stopTolUpXFactor; + t->rgsformat[i] = " lambda %d"; + t->rgpadr[i++] = (void *) &t->lambda; + t->rgsformat[i] = " mu %d"; + t->rgpadr[i++] = (void *) &t->mu; + t->rgsformat[i] = " weights %5s"; + t->rgpadr[i++] = (void *) t->weigkey; + t->rgsformat[i] = " fac*cs %lg"; + t->rgpadr[i++] = (void *) &t->cs; + t->rgsformat[i] = " fac*damps %lg"; + t->rgpadr[i++] = (void *) &t->damps; + t->rgsformat[i] = " ccumcov %lg"; + t->rgpadr[i++] = (void *) &t->ccumcov; + t->rgsformat[i] = " mucov %lg"; + t->rgpadr[i++] = (void *) &t->mucov; + t->rgsformat[i] = " fac*ccov %lg"; + t->rgpadr[i++]=(void *) &t->ccov; + t->rgsformat[i] = " diagonalCovarianceMatrix %lg"; + t->rgpadr[i++]=(void *) &t->diagonalCov; + t->rgsformat[i] = " updatecov %lg"; + t->rgpadr[i++]=(void *) &t->updateCmode.modulo; + t->rgsformat[i] = " maxTimeFractionForEigendecompostion %lg"; + t->rgpadr[i++]=(void *) &t->updateCmode.maxtime; + t->rgsformat[i] = " resume %59s"; + t->rgpadr[i++] = (void *) t->resumefile; + t->rgsformat[i] = " fac*maxFunEvals %lg"; + t->rgpadr[i++] = (void *) &t->facmaxeval; + t->rgsformat[i] = " fac*updatecov %lg"; + t->rgpadr[i++]=(void *) &t->facupdateCmode; + t->n1para = i; + t->n1outpara = i-2; /* disregard last parameters in WriteToFile() */ + + /* arrays */ + i = 0; + t->rgskeyar[i] = " typicalX %d"; + t->rgp2adr[i++] = &t->typicalX; + t->rgskeyar[i] = " initialX %d"; + t->rgp2adr[i++] = &t->xstart; + t->rgskeyar[i] = " initialStandardDeviations %d"; + t->rgp2adr[i++] = &t->rgInitialStds; + t->rgskeyar[i] = " diffMinChange %d"; + t->rgp2adr[i++] = &t->rgDiffMinChange; + t->n2para = i; + + t->N = dim; + t->seed = (unsigned) inseed; + t->xstart = NULL; + t->typicalX = NULL; + t->typicalXcase = 0; + t->rgInitialStds = NULL; + t->rgDiffMinChange = NULL; + t->stopMaxFunEvals = -1; + t->stopMaxIter = -1; + t->facmaxeval = 1; + t->stStopFitness.flg = -1; + t->stopTolFun = 1e-12; + t->stopTolFunHist = 1e-13; + t->stopTolX = 0; /* 1e-11*insigma would also be reasonable */ + t->stopTolUpXFactor = 1e3; + + t->lambda = lambda; + t->mu = -1; + t->mucov = -1; + t->weights = NULL; + strcpy(t->weigkey, "log"); + + t->cs = -1; + t->ccumcov = -1; + t->damps = -1; + t->ccov = -1; + + t->diagonalCov = 0; /* default is 0, but this might change in future, see below */ + + t->updateCmode.modulo = -1; + t->updateCmode.maxtime = -1; + t->updateCmode.flgalways = 0; + t->facupdateCmode = 1; + strcpy(t->resumefile, "_no_"); + + /* filename == NULL invokes default in readpara_Read... */ + if (!isNoneStr(filename) && (!filename || strcmp(filename, "writeonly") != 0)) + readpara_ReadFromFile(t, filename); + + if (t->N <= 0) + t->N = dim; + + N = t->N; + if (N == 0) + FATAL("readpara_readpara_t(): problem dimension N undefined.\n", + " (no default value available).",0,0); + if (t->xstart == NULL && inxstart == NULL && t->typicalX == NULL) { + ERRORMESSAGE("Warning: initialX undefined. typicalX = 0.5...0.5 used.","","",""); + printf("\nWarning: initialX undefined. typicalX = 0.5...0.5 used.\n"); + } + if (t->rgInitialStds == NULL && inrgsigma == NULL) { + /* FATAL("initialStandardDeviations undefined","","",""); */ + ERRORMESSAGE("Warning: initialStandardDeviations undefined. 0.3...0.3 used.","","",""); + printf("\nWarning: initialStandardDeviations. 0.3...0.3 used.\n"); + } + + if (t->xstart == NULL) { + t->xstart = new_double(N); + + /* put inxstart into xstart */ + if (inxstart != NULL) { + for (i=0; ixstart[i] = inxstart[i]; + } + /* otherwise use typicalX or default */ + else { + t->typicalXcase = 1; + for (i=0; ixstart[i] = (t->typicalX == NULL) ? 0.5 : t->typicalX[i]; + } + } /* xstart == NULL */ + + if (t->rgInitialStds == NULL) { + t->rgInitialStds = new_double(N); + for (i=0; irgInitialStds[i] = (inrgsigma == NULL) ? 0.3 : inrgsigma[i]; + } + + readpara_SupplementDefaults(t); + if (!isNoneStr(filename)) + readpara_WriteToFile(t, "actparcmaes.par"); +} /* readpara_init */ + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +void readpara_exit(readpara_t *t) { + if (t->filename != NULL) + free( t->filename); + if (t->xstart != NULL) /* not really necessary */ + free( t->xstart); + if (t->typicalX != NULL) + free( t->typicalX); + if (t->rgInitialStds != NULL) + free( t->rgInitialStds); + if (t->rgDiffMinChange != NULL) + free( t->rgDiffMinChange); + if (t->weights != NULL) + free( t->weights); + + free(t->rgsformat); + free(t->rgpadr); + free(t->rgskeyar); + free(t->rgp2adr); + free(t->weigkey); +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +void +readpara_ReadFromFile(readpara_t *t, const char * filename) { + char s[1000]; + const char *ss = "cmaes_initials.par"; + int ipara, i; + int size; + FILE *fp; + if (filename == NULL) { + filename = ss; + } + t->filename = NULL; /* nothing read so far */ + fp = fopen( filename, "r"); + if(fp == NULL) { + ERRORMESSAGE("cmaes_ReadFromFile(): could not open '", filename, "'",0); + return; + } + for (ipara=0; ipara < t->n1para; ++ipara) { + rewind(fp); + while(fgets(s, sizeof(s), fp) != NULL) { + /* skip comments */ + if (s[0] == '#' || s[0] == '%') + continue; + if(sscanf(s, t->rgsformat[ipara], t->rgpadr[ipara]) == 1) { + if (strncmp(t->rgsformat[ipara], " stopFitness ", 13) == 0) + t->stStopFitness.flg = 1; + break; + } + } + } /* for */ + if (t->N <= 0) + FATAL("readpara_ReadFromFile(): No valid dimension N",0,0,0); + for (ipara=0; ipara < t->n2para; ++ipara) { + rewind(fp); + while(fgets(s, sizeof(s), fp) != NULL) { /* read one line */ + /* skip comments */ + if (s[0] == '#' || s[0] == '%') + continue; + if(sscanf(s, t->rgskeyar[ipara], &size) == 1) { /* size==number of values to be read */ + if (size > 0) { + *t->rgp2adr[ipara] = new_double(t->N); + for (i=0; iN; ++i) /* start reading next line */ + if (fscanf(fp, " %lf", &(*t->rgp2adr[ipara])[i]) != 1) + break; + if (iN) { + ERRORMESSAGE("readpara_ReadFromFile ", filename, ": ",0); + FATAL( "'", t->rgskeyar[ipara], + "' not enough values found.\n", + " Remove all comments between numbers."); + } + for (; i < t->N; ++i) /* recycle */ + (*t->rgp2adr[ipara])[i] = (*t->rgp2adr[ipara])[i%size]; + } + } + } + } /* for */ + fclose(fp); + assign_string(&(t->filename), filename); /* t->filename must be freed */ + return; +} /* readpara_ReadFromFile() */ + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +void +readpara_WriteToFile(readpara_t *t, const char *filenamedest) { + int ipara, i; + size_t len; + time_t ti = time(NULL); + FILE *fp = fopen( filenamedest, "a"); + if(fp == NULL) { + ERRORMESSAGE("cmaes_WriteToFile(): could not open '", + filenamedest, "'",0); + return; + } + fprintf(fp, "\n# Read from %s at %s\n", t->filename ? t->filename : "", + asctime(localtime(&ti))); /* == ctime() */ + for (ipara=0; ipara < 1; ++ipara) { + fprintf(fp, t->rgsformat[ipara], *(int *)t->rgpadr[ipara]); + fprintf(fp, "\n"); + } + for (ipara=0; ipara < t->n2para; ++ipara) { + if(*t->rgp2adr[ipara] == NULL) + continue; + fprintf(fp, t->rgskeyar[ipara], t->N); + fprintf(fp, "\n"); + for (i=0; iN; ++i) + fprintf(fp, "%7.3g%c", (*t->rgp2adr[ipara])[i], (i%5==4)?'\n':' '); + fprintf(fp, "\n"); + } + for (ipara=1; ipara < t->n1outpara; ++ipara) { + if (strncmp(t->rgsformat[ipara], " stopFitness ", 13) == 0) + if(t->stStopFitness.flg == 0) { + fprintf(fp, " stopFitness\n"); + continue; + } + len = strlen(t->rgsformat[ipara]); + if (t->rgsformat[ipara][len-1] == 'd') /* read integer */ + fprintf(fp, t->rgsformat[ipara], *(int *)t->rgpadr[ipara]); + else if (t->rgsformat[ipara][len-1] == 's') /* read string */ + fprintf(fp, t->rgsformat[ipara], (char *)t->rgpadr[ipara]); + else { + if (strncmp(" fac*", t->rgsformat[ipara], 5) == 0) { + fprintf(fp, " "); + fprintf(fp, t->rgsformat[ipara]+5, *(double *)t->rgpadr[ipara]); + } else + fprintf(fp, t->rgsformat[ipara], *(double *)t->rgpadr[ipara]); + } + fprintf(fp, "\n"); + } /* for */ + fprintf(fp, "\n"); + fclose(fp); +} /* readpara_WriteToFile() */ + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +void +readpara_SupplementDefaults(readpara_t *t) { + double t1, t2; + int N = t->N; + clock_t cloc = clock(); + + if (t->seed < 1) { + while ((int) (cloc - clock()) == 0) + ; /* TODO: remove this for time critical applications!? */ + t->seed = (unsigned int)abs((long)(100*time(NULL)+clock())); + } + + if (t->stStopFitness.flg == -1) + t->stStopFitness.flg = 0; + + if (t->lambda < 2) + t->lambda = 4+(int)(3*log((double)N)); + if (t->mu == -1) { + t->mu = t->lambda/2; + readpara_SetWeights(t, t->weigkey); + } + if (t->weights == NULL) + readpara_SetWeights(t, t->weigkey); + + if (t->cs > 0) /* factor was read */ + t->cs *= (t->mueff + 2.) / (N + t->mueff + 3.); + if (t->cs <= 0 || t->cs >= 1) + t->cs = (t->mueff + 2.) / (N + t->mueff + 3.); + + if (t->ccumcov <= 0 || t->ccumcov > 1) + t->ccumcov = 4. / (N + 4); + + if (t->mucov < 1) { + t->mucov = t->mueff; + } + t1 = 2. / ((N+1.4142)*(N+1.4142)); + t2 = (2.*t->mueff-1.) / ((N+2.)*(N+2.)+t->mueff); + t2 = (t2 > 1) ? 1 : t2; + t2 = (1./t->mucov) * t1 + (1.-1./t->mucov) * t2; + if (t->ccov >= 0) /* ccov holds the read factor */ + t->ccov *= t2; + if (t->ccov < 0 || t->ccov > 1) /* set default in case */ + t->ccov = t2; + + if (t->diagonalCov == -1) + t->diagonalCov = 2 + 100. * N / sqrt((double)t->lambda); + + if (t->stopMaxFunEvals == -1) /* may depend on ccov in near future */ + t->stopMaxFunEvals = t->facmaxeval*900*(N+3)*(N+3); + else + t->stopMaxFunEvals *= t->facmaxeval; + + if (t->stopMaxIter == -1) + t->stopMaxIter = ceil((double)(t->stopMaxFunEvals / t->lambda)); + + if (t->damps < 0) + t->damps = 1; /* otherwise a factor was read */ + t->damps = t->damps + * (1 + 2*douMax(0., sqrt((t->mueff-1.)/(N+1.)) - 1)) /* basic factor */ + * douMax(0.3, 1. - /* modify for short runs */ + (double)N / (1e-6+douMin(t->stopMaxIter, t->stopMaxFunEvals/t->lambda))) + + t->cs; /* minor increment */ + + if (t->updateCmode.modulo < 0) + t->updateCmode.modulo = 1./t->ccov/(double)(N)/10.; + t->updateCmode.modulo *= t->facupdateCmode; + if (t->updateCmode.maxtime < 0) + t->updateCmode.maxtime = 0.20; /* maximal 20% of CPU-time */ + +} /* readpara_SupplementDefaults() */ + + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +void +readpara_SetWeights(readpara_t *t, const char * mode) { + double s1, s2; + int i; + + if(t->weights != NULL) + free( t->weights); + t->weights = new_double(t->mu); + if (strcmp(mode, "lin") == 0) + for (i=0; imu; ++i) + t->weights[i] = t->mu - i; + else if (strncmp(mode, "equal", 3) == 0) + for (i=0; imu; ++i) + t->weights[i] = 1; + else if (strcmp(mode, "log") == 0) + for (i=0; imu; ++i) + t->weights[i] = log(t->mu+1.)-log(i+1.); + else + for (i=0; imu; ++i) + t->weights[i] = log(t->mu+1.)-log(i+1.); + + /* normalize weights vector and set mueff */ + for (i=0, s1=0, s2=0; imu; ++i) { + s1 += t->weights[i]; + s2 += t->weights[i]*t->weights[i]; + } + t->mueff = s1*s1/s2; + for (i=0; imu; ++i) + t->weights[i] /= s1; + + if(t->mu < 1 || t->mu > t->lambda || + (t->mu==t->lambda && t->weights[0]==t->weights[t->mu-1])) + FATAL("readpara_SetWeights(): invalid setting of mu or lambda",0,0,0); + +} /* readpara_SetWeights() */ + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +static int +isNoneStr(const char * filename) { + if (filename && (strcmp(filename, "no") == 0 + || strcmp(filename, "non") == 0 + || strcmp(filename, "none") == 0)) + return 1; + + return 0; +} + +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ +static double +douSquare(double d) { + return d*d; +} +static int +intMin( int i, int j) { + return i < j ? i : j; +} +static double +douMax( double i, double j) { + return i > j ? i : j; +} +static double +douMin( double i, double j) { + return i < j ? i : j; +} +static double +rgdouMax( const double *rgd, int len) { + int i; + double max = rgd[0]; + for (i = 1; i < len; ++i) + max = (max < rgd[i]) ? rgd[i] : max; + return max; +} + +static double +rgdouMin( const double *rgd, int len) { + int i; + double min = rgd[0]; + for (i = 1; i < len; ++i) + min = (min > rgd[i]) ? rgd[i] : min; + return min; +} + +static int +MaxIdx( const double *rgd, int len) { + int i, res; + for(i=1, res=0; i rgd[res]) + res = i; + return res; +} +static int +MinIdx( const double *rgd, int len) { + int i, res; + for(i=1, res=0; i fabs(b)) { + r = b/a; + r = fabs(a)*sqrt(1+r*r); + } else if (b != 0) { + r = a/b; + r = fabs(b)*sqrt(1+r*r); + } + return r; +} + +static int SignOfDiff(const void *d1, const void * d2) { + return *((double *) d1) > *((double *) d2) ? 1 : -1; +} + +#if 1 +/* dirty index sort */ +static void Sorted_index(const double *rgFunVal, int *iindex, int n) { + int i, j; + for (i=1, iindex[0]=0; i0; --j) { + if (rgFunVal[iindex[j-1]] < rgFunVal[i]) + break; + iindex[j] = iindex[j-1]; /* shift up */ + } + iindex[j] = i; /* insert i */ + } +} +#endif + +static void * new_void(int n, size_t size) { + static char s[70]; + void *p = calloc((unsigned) n, size); + if (p == NULL) { + sprintf(s, "new_void(): calloc(%ld,%ld) failed",(long)n,(long)size); + FATAL(s,0,0,0); + } + return p; +} + +double * +cmaes_NewDouble(int n) { + return new_double(n); +} + +static double * new_double(int n) { + static char s[170]; + double *p = (double *) calloc((unsigned) n, sizeof(double)); + if (p == NULL) { + sprintf(s, "new_double(): calloc(%ld,%ld) failed", + (long)n,(long)sizeof(double)); + FATAL(s,0,0,0); + } + return p; +} + +static char * new_string(const char *ins) { + static char s[170]; + unsigned i; + char *p; + unsigned len = (unsigned) strlen(ins); + if (len > 1000) { + FATAL("new_string(): input string length was larger then 1000 ", + "(possibly due to uninitialized char *filename)",0,0); + } + + p = (char *) calloc( len + 1, sizeof(char)); + if (p == NULL) { + sprintf(s, "new_string(): calloc(%ld,%ld) failed", + (long)len,(long)sizeof(char)); + FATAL(s,0,0,0); + } + for (i = 0; i < len; ++i) + p[i] = ins[i]; + return p; +} +static void assign_string(char ** pdests, const char *ins) { + if (*pdests) + free(*pdests); + *pdests = new_string(ins); +} +/* --------------------------------------------------------- */ +/* --------------------------------------------------------- */ + +/* ========================================================= */ +void +cmaes_FATAL(char const *s1, char const *s2, char const *s3, + char const *s4) { + time_t t = time(NULL); + ERRORMESSAGE( s1, s2, s3, s4); + ERRORMESSAGE("*** Exiting cmaes_t ***",0,0,0); + printf("\n -- %s %s\n", asctime(localtime(&t)), + s2 ? szCat(s1, s2, s3, s4) : s1); + printf(" *** CMA-ES ABORTED, see errcmaes.err *** \n"); + fflush(stdout); + exit(1); +} + +/* ========================================================= */ +static void +FATAL(char const *s1, char const *s2, char const *s3, + char const *s4) { + cmaes_FATAL(s1, s2, s3, s4); +} + +/* ========================================================= */ +void ERRORMESSAGE( char const *s1, char const *s2, + char const *s3, char const *s4) { +#if 1 + /* static char szBuf[700]; desirable but needs additional input argument + sprintf(szBuf, "%f:%f", gen, gen*lambda); + */ + time_t t = time(NULL); + FILE *fp = fopen( "errcmaes.err", "a"); + if (!fp) { + printf("\nFATAL ERROR: %s\n", s2 ? szCat(s1, s2, s3, s4) : s1); + printf("cmaes_t could not open file 'errcmaes.err'."); + printf("\n *** CMA-ES ABORTED *** "); + fflush(stdout); + exit(1); + } + fprintf( fp, "\n -- %s %s\n", asctime(localtime(&t)), + s2 ? szCat(s1, s2, s3, s4) : s1); + fclose (fp); +#endif +} + +/* ========================================================= */ +char *szCat(const char *sz1, const char*sz2, + const char *sz3, const char *sz4) { + static char szBuf[700]; + + if (!sz1) + FATAL("szCat() : Invalid Arguments",0,0,0); + + strncpy ((char *)szBuf, sz1, (unsigned)intMin( (int)strlen(sz1), 698)); + szBuf[intMin( (int)strlen(sz1), 698)] = '\0'; + if (sz2) + strncat ((char *)szBuf, sz2, + (unsigned)intMin((int)strlen(sz2)+1, 698 - (int)strlen((char const *)szBuf))); + if (sz3) + strncat((char *)szBuf, sz3, + (unsigned)intMin((int)strlen(sz3)+1, 698 - (int)strlen((char const *)szBuf))); + if (sz4) + strncat((char *)szBuf, sz4, + (unsigned)intMin((int)strlen(sz4)+1, 698 - (int)strlen((char const *)szBuf))); + return (char *) szBuf; +} + + diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.h b/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.h new file mode 100644 index 000000000..0992ade21 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.h @@ -0,0 +1,175 @@ +/* --------------------------------------------------------- */ +/* --- File: cmaes.h ----------- Author: Nikolaus Hansen --- */ +/* ---------------------- last modified: IX 2010 --- */ +/* --------------------------------- by: Nikolaus Hansen --- */ +/* --------------------------------------------------------- */ +/* + CMA-ES for non-linear function minimization. + + Copyright (C) 1996, 2003-2010 Nikolaus Hansen. + e-mail: nikolaus.hansen (you know what) inria.fr + + License: see file cmaes.c + +*/ +#ifndef NH_cmaes_h /* only include ones */ +#define NH_cmaes_h + +#include + +typedef struct +/* random_t + * sets up a pseudo random number generator instance + */ +{ + /* Variables for Uniform() */ + long int startseed; + long int aktseed; + long int aktrand; + long int *rgrand; + + /* Variables for Gauss() */ + short flgstored; + double hold; +} random_t; + +typedef struct +/* timings_t + * time measurement, used to time eigendecomposition + */ +{ + /* for outside use */ + double totaltime; /* zeroed by calling re-calling timings_start */ + double totaltotaltime; + double tictoctime; + double lasttictoctime; + + /* local fields */ + clock_t lastclock; + time_t lasttime; + clock_t ticclock; + time_t tictime; + short istic; + short isstarted; + + double lastdiff; + double tictoczwischensumme; +} timings_t; + +typedef struct +/* readpara_t + * collects all parameters, in particular those that are read from + * a file before to start. This should split in future? + */ +{ + char * filename; /* keep record of the file that was taken to read parameters */ + + /* input parameters */ + int N; /* problem dimension, must stay constant, should be unsigned or long? */ + unsigned int seed; + double * xstart; + double * typicalX; + int typicalXcase; + double * rgInitialStds; + double * rgDiffMinChange; + + /* termination parameters */ + double stopMaxFunEvals; + double facmaxeval; + double stopMaxIter; + struct { int flg; double val; } stStopFitness; + double stopTolFun; + double stopTolFunHist; + double stopTolX; + double stopTolUpXFactor; + + /* internal evolution strategy parameters */ + int lambda; /* -> mu, <- N */ + int mu; /* -> weights, (lambda) */ + double mucov, mueff; /* <- weights */ + double *weights; /* <- mu, -> mueff, mucov, ccov */ + double damps; /* <- cs, maxeval, lambda */ + double cs; /* -> damps, <- N */ + double ccumcov; /* <- N */ + double ccov; /* <- mucov, <- N */ + double diagonalCov; /* number of initial iterations */ + struct { int flgalways; double modulo; double maxtime; } updateCmode; + double facupdateCmode; + + /* supplementary variables */ + + char *weigkey; + char resumefile[99]; + const char **rgsformat; + void **rgpadr; + const char **rgskeyar; + double ***rgp2adr; + int n1para, n1outpara; + int n2para; +} readpara_t; + +typedef struct +/* cmaes_t + * CMA-ES "object" + */ +{ + const char *version; + /* char *signalsFilename; */ + readpara_t sp; + random_t rand; /* random number generator */ + + double sigma; /* step size */ + + double *rgxmean; /* mean x vector, "parent" */ + double *rgxbestever; + double **rgrgx; /* range of x-vectors, lambda offspring */ + int *index; /* sorting index of sample pop. */ + double *arFuncValueHist; + + short flgIniphase; /* not really in use anymore */ + short flgStop; + + double chiN; + double **C; /* lower triangular matrix: i>=j for C[i][j] */ + double **B; /* matrix with normalize eigenvectors in columns */ + double *rgD; /* axis lengths */ + + double *rgpc; + double *rgps; + double *rgxold; + double *rgout; + double *rgBDz; /* for B*D*z */ + double *rgdTmp; /* temporary (random) vector used in different places */ + double *rgFuncValue; + double *publicFitness; /* returned by cmaes_init() */ + + double gen; /* Generation number */ + double countevals; + double state; /* 1 == sampled, 2 == not in use anymore, 3 == updated */ + + double maxdiagC; /* repeatedly used for output */ + double mindiagC; + double maxEW; + double minEW; + + char sOutString[330]; /* 4x80 */ + + short flgEigensysIsUptodate; + short flgCheckEigen; /* control via cmaes_signals.par */ + double genOfEigensysUpdate; + timings_t eigenTimings; + + double dMaxSignifKond; + double dLastMinEWgroesserNull; + + short flgresumedone; + + time_t printtime; + time_t writetime; /* ideally should keep track for each output file */ + time_t firstwritetime; + time_t firstprinttime; + +} cmaes_t; + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.hpp new file mode 100644 index 000000000..b6724111c --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes.hpp @@ -0,0 +1,94 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef CMAES_HPP_ +#define CMAES_HPP_ + +#include +#include +#include +#include +#include +#include +#include "cmaes_interface.h" + +namespace sferes { + + namespace ea { + + SFERES_EA(Cmaes, Ea) { + public: + Cmaes() { + _ar_funvals = cmaes_init(&_evo, dim, NULL, NULL, 0, 0, NULL); + _lambda = cmaes_Get(&_evo, "lambda"); // default lambda (pop size) + } + ~Cmaes() { + cmaes_exit(&_evo); + } + void random_pop() { + // we don't really need the random here + this->_pop.resize(_lambda); + BOOST_FOREACH(boost::shared_ptr&indiv, this->_pop) { + indiv = boost::shared_ptr(new Phen()); + } + } + void epoch() { + // + _cmaes_pop = cmaes_SamplePopulation(&_evo); + // copy pop + for (size_t i = 0; i < this->_pop.size(); ++i) + for (size_t j = 0; j < this->_pop[i]->size(); ++j) { + this->_pop[i]->gen().data(j, _cmaes_pop[i][j]); + this->_pop[i]->develop(); + } + // eval + this->_eval.eval(this->_pop, 0, this->_pop.size()); + this->apply_modifier(); + for (size_t i = 0; i < this->_pop.size(); ++i) { + //warning: CMAES minimizes the fitness... + _ar_funvals[i] = - this->_pop[i]->fit().value(); + } + // + cmaes_UpdateDistribution(&_evo, _ar_funvals); + } + protected: + SFERES_CONST size_t dim = Phen::gen_t::gen_size; + cmaes_t _evo; + double *_ar_funvals; + double * const * _cmaes_pop; + int _lambda; + }; + } +} +#endif \ No newline at end of file diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes_interface.h b/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes_interface.h new file mode 100644 index 000000000..4b60bb6dc --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/cmaes_interface.h @@ -0,0 +1,55 @@ +/* --------------------------------------------------------- */ +/* --- File: cmaes_interface.h - Author: Nikolaus Hansen --- */ +/* ---------------------- last modified: IV 2007 --- */ +/* --------------------------------- by: Nikolaus Hansen --- */ +/* --------------------------------------------------------- */ +/* + CMA-ES for non-linear function minimization. + + Copyright (C) 1996, 2003, 2007 Nikolaus Hansen. + e-mail: hansen AT lri.fr + + License: see file cmaes.c +*/ +#include "cmaes.h" + +/* --------------------------------------------------------- */ +/* ------------------ Interface ---------------------------- */ +/* --------------------------------------------------------- */ + +/* --- initialization, constructors, destructors --- */ +double * cmaes_init(cmaes_t *, int dimension , double *xstart, + double *stddev, long seed, int lambda, + const char *input_parameter_filename); +void cmaes_resume_distribution(cmaes_t *evo_ptr, char *filename); +void cmaes_exit(cmaes_t *); + +/* --- core functions --- */ +double * const * cmaes_SamplePopulation(cmaes_t *); +double * cmaes_UpdateDistribution(cmaes_t *, + const double *rgFitnessValues); +const char * cmaes_TestForTermination(cmaes_t *); + +/* --- additional functions --- */ +double * const * cmaes_ReSampleSingle( cmaes_t *t, int index); +double const * cmaes_ReSampleSingle_old(cmaes_t *, double *rgx); +double * cmaes_SampleSingleInto( cmaes_t *t, double *rgx); +void cmaes_UpdateEigensystem(cmaes_t *, int flgforce); + +/* --- getter functions --- */ +double cmaes_Get(cmaes_t *, char const *keyword); +const double * cmaes_GetPtr(cmaes_t *, char const *keyword); /* e.g. "xbestever" */ +double * cmaes_GetNew( cmaes_t *t, char const *keyword); /* user is responsible to free */ +double * cmaes_GetInto( cmaes_t *t, char const *keyword, double *mem); /* allocs if mem==NULL, user is responsible to free */ + +/* --- online control and output --- */ +void cmaes_ReadSignals(cmaes_t *, char const *filename); +void cmaes_WriteToFile(cmaes_t *, const char *szKeyWord, + const char *output_filename); +char * cmaes_SayHello(cmaes_t *); +/* --- misc --- */ +double * cmaes_NewDouble(int n); /* user is responsible to free */ +void cmaes_FATAL(char const *s1, char const *s2, char const *s3, + char const *s4); + + diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/common.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/common.hpp new file mode 100644 index 000000000..59ad19086 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/common.hpp @@ -0,0 +1,72 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef COMMON_HPP_ +#define COMMON_HPP_ +namespace sferes { + namespace ea { + + template + struct random { + std::vector >& _pop; + ~random() { } + random(std::vector >& pop) : _pop(pop) {} + random(const random& ev) : _pop(ev._pop) {} + void operator() (const parallel::range_t& r) const { + for (size_t i = r.begin(); i != r.end(); ++i) { + _pop[i] = boost::shared_ptr(new Phen()); + _pop[i]->random(); + } + } + }; + + template + struct mutate { + std::vector >& _pop; + + ~mutate() { } + mutate(std::vector >& pop) : _pop(pop) {} + mutate(const mutate& ev) : _pop(ev._pop) {} + void operator() (const parallel::range_t& r) const { + for (size_t i = r.begin(); i != r.end(); ++i) + _pop[i]->mutate(); + } + }; + + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/crowd.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/crowd.hpp new file mode 100644 index 000000000..8fd7f1ee9 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/crowd.hpp @@ -0,0 +1,221 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef CROWD_H_ +# define CROWD_H_ + +#include + +namespace sferes { + namespace ea { + namespace crowd { + SFERES_CONST float inf = 1.0e14; + + template + class assign_crowd { + public: + std::vector >& _fronts; + + ~assign_crowd() { } + assign_crowd(std::vector >& fronts) : + _fronts(fronts) {} + assign_crowd(const assign_crowd& ev) : _fronts(ev._fronts) {} + void operator() (const parallel::range_t& r) const { + for (size_t i = r.begin(); i != r.end(); ++i) + _assign_crowd(_fronts[i]); + } + protected: + typedef typename std::vector::iterator it_t; + typedef typename std::vector::const_iterator cit_t; + + void _fmin_max(const std::vector& f, + std::vector& fmin, + std::vector& fmax) const { + assert(f.size()); + size_t nb_objs = f[0]->fit().objs().size(); + assert(nb_objs); + fmin.resize(nb_objs); + fmax.resize(nb_objs); + for (unsigned i = 0; i < nb_objs; ++i) { + float mi = std::numeric_limits::max(); + float ma = -std::numeric_limits::max(); + for (cit_t it = f.begin(); it != f.end(); ++it) { + float o = (*it)->fit().obj(i); + assert(!std::isnan(o)); + assert(!std::isnan(i)); + assert(!std::isinf(o)); + assert(!std::isinf(i)); + + if (o < mi) + mi = o; + if (o > ma) + ma = o; + } + fmin[i] = mi; + fmax[i] = ma; + assert(fmin[i] <= fmax[i]); + } + } + + /// Deb, p248 + /// /!\ end not included (like any stl algo) + void _assign_crowd(std::vector& f) const { + +#ifndef NDEBUG + BOOST_FOREACH(Indiv& ind, f) + for (size_t i = 0; i < ind->fit().objs().size(); ++i) { + assert(!std::isnan(ind->fit().obj(i))); + } +#endif + + + if (f.size() == 1) { + f[0]->set_crowd(crowd::inf); + return; + } + if (f.size() == 2) { + f[0]->set_crowd(crowd::inf); + f[1]->set_crowd(crowd::inf); + } + + size_t nb_objs = f[0]->fit().objs().size(); + + // C1 + BOOST_FOREACH(Indiv& i, f) + i->set_crowd(0.0f); + + std::vector fmin, fmax; + _fmin_max(f, fmin, fmax); + + // C2 + C3 + // for each obj + for (size_t i = 0; i < nb_objs; ++i) { + // sort in order of f_m (best first) + parallel::sort(f.begin(), f.end(), fit::compare_obj(i)); + assert(!std::isnan(f[0]->fit().obj(i))); + assert(!std::isinf(f[0]->fit().obj(i))); + assert(!std::isnan(f[1]->fit().obj(i))); + assert(!std::isinf(f[1]->fit().obj(i))); + assert(f[0]->fit().obj(i) >= f[1]->fit().obj(i)); + + // assign + f[0]->set_crowd(crowd::inf); + f[f.size() - 1]->set_crowd(crowd::inf); + + for (it_t it = f.begin() + 1; it != f.end() - 1; ++it) { + assert(i < fmin.size()); + assert(i < fmax.size()); + assert(!std::isnan(fmax[i])); + assert(!std::isinf(fmax[i])); + assert(!std::isnan(fmin[i])); + assert(!std::isinf(fmin[i])); + float f = (*(it - 1))->fit().obj(i) - (*(it + 1))->fit().obj(i); + assert(fmax[i] - fmin[i] >= 0); + assert(f >= 0); + if (fmax[i] - fmin[i] != 0) + f /= fmax[i] - fmin[i]; + else + f = 0.0f; + assert(!std::isnan(f)); + assert(!std::isinf(f)); + assert(f >= 0); + (*it)->set_crowd((*it)->crowd() + f); + } + } + } + + }; + + struct compare_crowd { + template + bool operator()(const boost::shared_ptr i1, const boost::shared_ptr i2) const { + return i1->crowd() > i2->crowd(); + } + }; + + struct compare_ranks { + template + bool operator()(const boost::shared_ptr i1, const boost::shared_ptr i2) const { + return i1->rank() < i2->rank(); + } + }; + + + // a special indiv to add rank & crowd + template + class Indiv : public Phen { + public: + int rank() const { + return _rank; + } + float crowd() const { + return _crowd; + } + void set_rank(int r) { + _rank = r; + } + void set_crowd(float d) { + _crowd = d; + } + Indiv(const Phen& p) : Phen(p) {} + Indiv() {} + // overriding ! (not a redefinition of a virtual) + void cross(const boost::shared_ptr& i2, + boost::shared_ptr& o1, + boost::shared_ptr& o2) { + assert(i2); + if (!o1) + o1 = boost::shared_ptr(new Indiv()); + if (!o2) + o2 = boost::shared_ptr(new Indiv()); + this->_gen.cross(i2->gen(), o1->gen(), o2->gen()); +#ifdef TRACK_FIT +#warning track fit is enabled + o1->fit() = this->fit(); + o2->fit() = i2->fit(); +#endif + } + protected: + // rank + int _rank; + // crowding distance + float _crowd; + }; + + } + } +} + +#endif /* !CROWD_H_ */ diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/dom_sort.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/dom_sort.hpp new file mode 100644 index 000000000..d4312a8db --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/dom_sort.hpp @@ -0,0 +1,229 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef DOM_SORT_HPP +#define DOM_SORT_HPP + +#include +//#warning NEW algorithm for NSGA-2 (2 objectives)-> define SFERES_FAST_DOMSORT ! +namespace sferes { + namespace ea { + namespace _dom_sort { + template + struct count_dom { + const std::vector& pop; + std::vector& n; + std::vector >& s; + std::vector& r; + + ~count_dom() { } + count_dom(const std::vector& pop_, + std::vector& n_, + std::vector >& s_, + std::vector& r_) : + pop(pop_), n(n_), s(s_), r(r_) { + } + count_dom(const count_dom& ev) : + pop(ev.pop), n(ev.n), s(ev.s), r(ev.r) {} + void operator() (const parallel::range_t& range) const { + assert(n.size() == pop.size()); + assert(s.size() == pop.size()); + for (size_t p = range.begin(); p != range.end(); ++p) { + assert(s[p].empty()); + n[p] = 0; + for (size_t q = 0; q < pop.size(); ++q) { + int flag = fit::dominate_flag(pop[p], pop[q]); + if (flag > 0) + s[p].push_back(q); + else if (flag < 0) + ++n[p]; + } + if (n[p] == 0) + r[p] = 0; + } + } + }; + + + // cf deb's paper on NSGA-2 : + /// @article{deb2002nsga, + // title={{NSGA-II}}, + // author={Deb, K. and Pratap, A. and Agarwal, S. and Meyarivan, T. and Fast, A. and Algorithm, E.M.G.}, + // journal={IEEE transactions on evolutionary computation}, + // volume={6}, + // number={2}, + // year={2002} + // } + // this algorithm is in O(n^2) + template + inline void sort_deb(const std::vector& pop, + std::vector >& fronts, + std::vector& ranks) { + assert(!pop.empty()); + std::vector > s(pop.size()); + std::vector > f(1); + std::vector n(pop.size()); + ranks.resize(pop.size()); + + std::fill(ranks.begin(), ranks.end(), pop.size()); + +#ifndef NDEBUG + BOOST_FOREACH(const Indiv& ind, pop) + for (size_t i = 0; i < ind->fit().objs().size(); ++i) { + assert(!std::isnan(ind->fit().objs()[i])); + } +#endif + + parallel::p_for(parallel::range_t(0, pop.size()), + _dom_sort::count_dom(pop, n, s, ranks)); + + for (size_t i = 0; i < pop.size(); ++i) + if (ranks[i] == 0) + f[0].push_back(i); + +#ifndef NDEBUG + BOOST_FOREACH(size_t k, n) { + assert(k < pop.size()); + } + +#endif + assert(!f[0].empty()); + // second step : make layers + size_t i = 0; + while (!f[i].empty()) { + f.push_back(std::vector()); + for (size_t pp = 0; pp < f[i].size(); ++pp) { + size_t p = f[i][pp]; + for (size_t k = 0; k < s[p].size(); ++k) { + size_t q = s[p][k]; + assert(q != p); + assert(n[q] != 0); + --n[q]; + if (n[q] == 0) { + ranks[q] = i + 1; + f.back().push_back(q); + } + } + } + ++i; + assert(i < f.size()); + } + +#ifndef NDEBUG + size_t size = 0; + BOOST_FOREACH(std::vector& v, f) + size += v.size(); + assert(size == pop.size()); +#endif + + // copy indivs to the res + fronts.clear(); + fronts.resize(f.size()); + for (unsigned i = 0; i < f.size(); ++i) + for (unsigned j = 0; j < f[i].size(); ++j) + fronts[i].push_back(pop[f[i][j]]); + + assert(fronts.back().size() == 0); + fronts.pop_back(); + assert(!fronts.empty()); + assert(!fronts[0].empty()); + } + + template + inline std::vector new_vector(const T& t) { + std::vector v; + v.push_back(t); + return v; + } + + struct _comp_fronts { + // this functor is ONLY for dom_sort_fast2 + template + bool operator()(const T& f2, const T& f1) { + assert(f1.size() == 1); + assert(f1[0]->fit().objs().size() == 2); + // we only need to compare f1 to the value of the last element of f2 + if (f1[0]->fit().obj(1) < f2.back()->fit().obj(1)) + return true; + else + return false; + } + }; + + // see M. T. Jensen, 2003 + template + inline void sort_2objs(const std::vector& pop, + std::vector > & f, + std::vector& ranks) { + std::vector p = pop; + parallel::sort(p.begin(), p.end(), fit::compare_objs_lex()); + f.push_back(new_vector(p[0])); + size_t e = 0; + for (size_t i = 1; i < p.size(); ++i) { + if (p[i]->fit().obj(1) > f[e].back()->fit().obj(1)) { // !dominate(si, f_e) + typename std::vector >::iterator b = + std::lower_bound(f.begin(), f.end(), new_vector(p[i]), + _comp_fronts()); + assert(b != f.end()); + b->push_back(p[i]); + } else { + ++e; + f.push_back(new_vector(p[i])); + } + } + // assign ranks to follow the interface + for (size_t i = 0; i < f.size(); ++i) + for (size_t j = 0; j < f[i].size(); ++j) + f[i][j]->set_rank(i); + } + } + + template + inline void dom_sort(const std::vector& pop, + std::vector >& fronts, + std::vector& ranks) { +#ifdef SFERES_FAST_DOMSORT + if (pop[0]->fit().objs().size() == 2) + _dom_sort::sort_2objs(pop, fronts, ranks); + else +#endif + _dom_sort::sort_deb(pop, fronts, ranks); + } + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/dom_sort_basic.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/dom_sort_basic.hpp new file mode 100644 index 000000000..48c264fdb --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/dom_sort_basic.hpp @@ -0,0 +1,102 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef DOM_SORT_BASIC_HPP +#define DOM_SORT_BASIC_HPP + + +namespace sferes { + namespace ea { + namespace _dom_sort_basic { + struct non_dominated_f { + template + inline bool operator() (const Indiv& ind, const std::vector& pop) const { + + BOOST_FOREACH(Indiv i, pop) { + assert(i); + assert(ind); + if (fit::dominate(i, ind)) + return false; + } + return true; + } + }; + } + template + inline void dom_sort_basic(const std::vector& pop, + std::vector >& fronts, + const ND& nd, + std::vector& ranks) { + std::vector p(pop.size()); + for (size_t i = 0; i < p.size(); ++i) + p[i] = i; + ranks.resize(pop.size()); + int rank = 0; + while (!p.empty()) { + std::vector non_dominated; + std::vector non_dominated_ind; + std::vector tmp_pop; + for (size_t i = 0; i < p.size(); ++i) + tmp_pop.push_back(pop[p[i]]); + for (size_t i = 0; i < p.size(); ++i) + if (nd(pop[p[i]], tmp_pop)) { + non_dominated.push_back(p[i]); + ranks[p[i]] = rank; + non_dominated_ind.push_back(pop[p[i]]); + } + assert(non_dominated.size()); + std::vector np; + std::set_difference(p.begin(), p.end(), + non_dominated.begin(), non_dominated.end(), + std::back_insert_iterator >(np)); + assert(np.size() < p.size()); + p.swap(np); + fronts.push_back(non_dominated_ind); + ++rank; + } + } + + template + inline void dom_sort_basic(const std::vector& pop, + std::vector >& fronts, + std::vector& ranks) { + dom_sort_basic(pop, fronts, _dom_sort_basic::non_dominated_f(), ranks); + } + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/ea.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/ea.hpp new file mode 100644 index 000000000..1a9a45302 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/ea.hpp @@ -0,0 +1,281 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef EA_HPP_ +#define EA_HPP_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace sferes { + namespace ea { + + template + struct RefreshStat_f { + RefreshStat_f(const E &ea) : _ea(ea) { + } + const E& _ea; + template + void operator() (T & x) const { + x.refresh(_ea); + } + }; + template + struct WriteStat_f { + WriteStat_f(A & a) : _archive(a) { + } + A& _archive; + template + void operator() (const T &x) const { + std::string version(VERSION); + _archive << boost::serialization::make_nvp("version", + version); + _archive << BOOST_SERIALIZATION_NVP(x); + } + }; + + template + struct ReadStat_f { + ReadStat_f(A & a) : _archive(a) { + } + A& _archive; + template + void operator() (T & x) const { + std::string version; + _archive >> boost::serialization::make_nvp("version", version); + if (version != std::string(VERSION)) + std::cerr << "WARNING: your are loading a file made with sferes version " + << version << " while the current version is:" + << VERSION + << std::endl; + _archive >> BOOST_SERIALIZATION_NVP(x); + } + }; + + struct ShowStat_f { + ShowStat_f(unsigned n, std::ostream & os, size_t k) : + _n(n), _i(0), _os(os), _k(k) { + } + template + void operator() (T & x) const { + if (_i == _n) + x.show(_os, _k); + + ++_i; + } + int _n; + mutable int _i; + std::ostream& _os; + size_t _k; + }; + + + template + struct ApplyModifier_f { + ApplyModifier_f(E &ea) : _ea(ea) { + } + E& _ea; + template + void operator() (T & x) const { + x.apply(_ea); + } + }; + + template + class Ea : public stc::Any { + public: + typedef Phen phen_t; + typedef Eval eval_t; + typedef Stat stat_t; + typedef Params params_t; + typedef typename + boost::mpl::if_, + FitModifier, + boost::fusion::vector >::type modifier_t; + typedef std::vector > pop_t; + + Ea() : _pop(Params::pop::size), _gen(0) { + _make_res_dir(); + } + + void run() { + dbg::trace trace("ea", DBG_HERE); + random_pop(); + for (_gen = 0; _gen < Params::pop::nb_gen; ++_gen) { + epoch(); + update_stats(); + if (_gen % Params::pop::dump_period == 0) + _write(_gen); + } + } + void random_pop() { + dbg::trace trace("ea", DBG_HERE); + stc::exact(this)->random_pop(); + } + void epoch() { + dbg::trace trace("ea", DBG_HERE); + stc::exact(this)->epoch(); + } + const pop_t& pop() const { + return _pop; + }; + pop_t& pop() { + return _pop; + }; + const eval_t& eval() const { + return _eval; + } + eval_t& eval() { + return _eval; + } + const stat_t& stat() const { + return _stat; + } + const modifier_t& fit_modifier() const { + return _fit_modifier; + } + + // modifiers + void apply_modifier() { + boost::fusion::for_each(_fit_modifier, ApplyModifier_f(stc::exact(*this))); + } + + // stats + template + const typename boost::fusion::result_of::value_at_c::type& stat() const { + return boost::fusion::at_c(_stat); + } + void load(const std::string& fname) { + _load(fname); + } + void show_stat(unsigned i, std::ostream& os, size_t k = 0) { + boost::fusion::for_each(_stat, ShowStat_f(i, os, k)); + } + void update_stats() { + boost::fusion::for_each(_stat, RefreshStat_f(stc::exact(*this))); + } + + + const std::string& res_dir() const { + return _res_dir; + } + size_t gen() const { + return _gen; + } + bool dump_enabled() const { + return Params::pop::dump_period != -1; + } + void write() const { + _write(gen()); + } + void write(size_t g) const { + _write(g); + } + protected: + pop_t _pop; + eval_t _eval; + stat_t _stat; + modifier_t _fit_modifier; + std::string _res_dir; + size_t _gen; + + void _make_res_dir() { + if (Params::pop::dump_period == -1) + return; + + _res_dir = misc::hostname() + "_" + misc::date() + "_" + misc::getpid(); + boost::filesystem::path my_path(_res_dir); + boost::filesystem::create_directory(my_path); + } + void _write(int gen) const { + dbg::trace trace("ea", DBG_HERE); + if (Params::pop::dump_period == -1) + return; + std::string fname = _res_dir + std::string("/gen_") + + boost::lexical_cast(gen); + std::ofstream ofs(fname.c_str()); + typedef boost::archive::xml_oarchive oa_t; + oa_t oa(ofs); + boost::fusion::for_each(_stat, WriteStat_f(oa)); + std::cout << fname << " written" << std::endl; + } + void _load(const std::string& fname) { + dbg::trace trace("ea", DBG_HERE); + std::cout << "loading " << fname << std::endl; + std::ifstream ifs(fname.c_str()); + if (ifs.fail()) { + std::cerr << "Cannot open :" << fname + << "(does file exist ?)" << std::endl; + exit(1); + } + typedef boost::archive::xml_iarchive ia_t; + ia_t ia(ifs); + boost::fusion::for_each(_stat, ReadStat_f(ia)); + } + }; + } +} + +#define SFERES_EA(Class, Parent) \ + template \ + class Class : public Parent < Phen, Eval, Stat, FitModifier, Params, \ + typename stc::FindExact, Exact>::ret > + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/eps_moea.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/eps_moea.hpp new file mode 100644 index 000000000..5cdbc32b4 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/eps_moea.hpp @@ -0,0 +1,353 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef EPSMOEA_HPP_ +#define EPSMOEA_HPP_ + +#include +#include + +#include + +#include +#include +#include +#include +#include +namespace sferes { + namespace ea { + // param : eps (array) + // param : min_fit (array) + // param : grain + SFERES_EA(EpsMOEA, Ea) { + public: + void random_pop() { + parallel::init(); + this->_pop.resize(Params::pop::size); + parallel::p_for(parallel::range_t(0, this->_pop.size()), + random(this->_pop)); + this->_eval.eval(this->_pop, 0, this->_pop.size()); + + // create archive + add_to_archive(this->_pop.front()); + for (typename pop_t :: const_iterator it = this->_pop.begin(); + it != this->_pop.end(); ++it) + archive_acceptance(*it); + sync_archive(); + } + + void epoch() { + std::vector indivs; + + for (size_t i = 0; i < Params::pop::grain; ++i) { + indiv_t i1 = pop_selection(); + indiv_t i2 = archive_selection(); + indiv_t c1, c2; + i1->cross(i2, c1, c2); + indivs.push_back(c1); + indivs.push_back(c2); + } + parallel::p_for(parallel::range_t(0, indivs.size()), + mutate(indivs)); + + this->_eval.eval(indivs, 0, indivs.size()); + + BOOST_FOREACH(indiv_t i, indivs) + if (pop_acceptance(i)) + archive_acceptance(i); + sync_archive(); + } + const std::vector >& pareto_front() const { + return _pareto_front; + } + protected: + typedef boost::shared_ptr indiv_t; + typedef std::vector pop_t; + typedef std::pair > elite_t; + typedef std::list archive_t; + typedef std::vector > id_t; + + // elite + archive_t pop_e; + + // identification vectors + id_t id_b; + + pop_t _pareto_front; + + // keep pareto_front & elite synchronized (for stat reporting) + void sync_archive() { + _pareto_front.clear(); + BOOST_FOREACH(elite_t& i, pop_e) + _pareto_front.push_back(i.first); + } + + /// return a random + tournament individual in P + indiv_t pop_selection() { + + indiv_t i1 = this->_pop[misc::rand(this->_pop.size())]; + indiv_t i2 = this->_pop[misc::rand(this->_pop.size())]; + + int flag = check_dominance(i1, i2); + switch (flag) { + case 1: // a dom b + return i1; + case -1: + return i2; + case 0: + if (misc::flip_coin()) + return i1; + else + return i2; + } + assert(0); + return indiv_t(); + } + + /// return a random individual in E + indiv_t archive_selection() { + return misc::rand_in_list(pop_e)->first; + + } + + /// try to insert the offspring in population + /// return true if accepted + bool pop_acceptance(indiv_t ind) { + dbg::out(dbg::info, "epsmoea")<<"pop_acceptance :"< array; + int i = 0; + + for (typename pop_t :: const_iterator it = this->_pop.begin(); + it != this->_pop.end(); ++it) { + flag = check_dominance(ind, *it); + switch (flag) { + case 1: + array.push_back(i); + break; + case -1: + dbg::out(dbg::info, "epsmoea")<<"pop_acceptance -> rejected"<_pop.size()); + dbg::out(dbg::info, "epsmoea")<<"pop_acceptance, removing :" + <_pop[k]) + <<" array.size()="<_pop[k] = ind; + dbg::out(dbg::info, "epsmoea")<<"pop_acceptance -> accepted (k="< rejected"< the offspring (indiv) is eps-non-dominated + // if it isn't in any filled box, we add it to the archive + if (!same_box) { + add_to_archive(indiv); + return true; + } + assert(it != pop_e.end()); + // else, they are in the same box and we do a dominance check + int flag = check_dominance(ind.first, it->first); + float d1, d2; + switch (flag) { + case 1: + pop_e.erase(it); + add_to_archive(indiv); + return true; + case -1: + return false; + case 0: + //both are non-dominated, we select the closest to the B + //vector + // /!\ -> loss of a archived individual ! + d1 = dist_to_id(ind); + d2 = dist_to_id(*it); + if (d1 <= d2) { + pop_e.erase(it); + add_to_archive(indiv); + return true; + } else + return false; + default: + assert(0); + } + assert(0); + return false; + } + + /// check dominance using the identification vector + /// returns the following: + /// * 1 if a dominates b + /// * 2 if b dominates a + /// * 3 if a and b are non-dominated and a!=b (identification arrays unequal) + /// * 4 if a and b are non-dominated and a=b + int check_box_dominance(const elite_t &a, const elite_t &b) const { + int flag1 = 0, flag2 = 0; + + for (unsigned i = 0; i < Params::pop::eps_size(); ++i) + if (a.second[i] > b.second[i]) + flag1 = 1; + else if (b.second[i] > a.second[i]) + flag2 = 1; + + // a dominates b + if (flag1 && !flag2) + return 1; + // b dominates a + if (!flag1 && flag2) + return 2; + // a and b are non-dominated and a!=b (identification arrays unequal) + if (flag1 && flag2) + return 3; + // a and b are non-dominated and a=b + assert(!flag1 && !flag2); + return 4; + } + + /// standard dominance + /// * 1 if a dominates b + /// * -1 if b dominates a + /// * 0 if both a and b are non-dominated + int check_dominance(const indiv_t a, const indiv_t b) const { + assert(a->fit().objs().size() == b->fit().objs().size()); + assert(a->fit().objs().size()); + size_t nb_objs = a->fit().objs().size(); + int flag1 = 0, flag2 = 0; + for (size_t i = 0; i < nb_objs; ++i) + if (a->fit().obj(i) > b->fit().obj(i)) + flag1 = 1; + else if (a->fit().obj(i) < b->fit().obj(i)) + flag2 = 1; + + if (flag1 && !flag2) + return 1; + + if (!flag1 && flag2) + return -1; + + return 0; + } + + /// compute an identification vector and return it + elite_t make_identification_vector(indiv_t indiv) const { + elite_t e; + e.first = indiv; + e.second.resize(Params::pop::eps_size()); + dbg::out(dbg::info, "epsmoea")<<"eps_size="<fit().objs().size() + <fit().objs().size()); + for (size_t i = 0; i < Params::pop::eps_size(); ++i) + e.second[i] = + ceil((indiv->fit().obj(i) - Params::pop::min_fit(i)) / Params::pop::eps(i)); + return e; + } + + /// compute a squared euclidean distance between a's fitness and + /// a's identification vector + float dist_to_id(const elite_t& a) const { + float res = 0; + assert(a.first->fit().objs().size() == a.second.size()); + for (unsigned i = 0; i < a.first->fit().objs().size(); ++i) + res += powf(a.first->fit().obj(i) - a.second[i], 2.0); + return res; + } + + /// make the identification vector and add to the archive / elite + /// list + void add_to_archive(indiv_t indiv) { + dbg::out(dbg::info, "epsmoea")<<"add_to_archive :"<fit().objs().size(); ++i) + s += boost::lexical_cast(indiv->fit().obj(i)) + " "; + return s; + } + + }; + } +} +#endif + + diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/nsga2.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/nsga2.hpp new file mode 100644 index 000000000..738302155 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/nsga2.hpp @@ -0,0 +1,275 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef NSGA2_HPP_ +#define NSGA2_HPP_ + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace sferes { + namespace ea { + // Main class + SFERES_EA(Nsga2, Ea) { + public: + typedef boost::shared_ptr > indiv_t; + typedef typename std::vector pop_t; + typedef typename pop_t::iterator it_t; + typedef typename std::vector > front_t; + + void random_pop() { + parallel::init(); + + _parent_pop.resize(Params::pop::size); + assert(Params::pop::size % 4 == 0); + + pop_t init_pop((size_t)(Params::pop::size * Params::pop::initial_aleat)); + parallel::p_for(parallel::range_t(0, init_pop.size()), + random >(init_pop)); + _eval_pop(init_pop); + _apply_modifier(init_pop); + front_t fronts; + _rank_crowd(init_pop, fronts); + _fill_nondominated_sort(init_pop, _parent_pop); + } + + void epoch() { + this->_pop.clear(); + _pareto_front.clear(); + _selection (_parent_pop, _child_pop); + parallel::p_for(parallel::range_t(0, _child_pop.size()), + mutate >(_child_pop)); +#ifndef EA_EVAL_ALL + _eval_pop(_child_pop); + _merge(_parent_pop, _child_pop, _mixed_pop); +#else + _merge(_parent_pop, _child_pop, _mixed_pop); + _eval_pop(_mixed_pop); +#endif + _apply_modifier(_mixed_pop); +#ifndef NDEBUG + BOOST_FOREACH(indiv_t& ind, _mixed_pop) + for (size_t i = 0; i < ind->fit().objs().size(); ++i) { + assert(!std::isnan(ind->fit().objs()[i])); + } +#endif + _fill_nondominated_sort(_mixed_pop, _parent_pop); + _mixed_pop.clear(); + _child_pop.clear(); + + _convert_pop(_parent_pop, this->_pop); + + assert(_parent_pop.size() == Params::pop::size); + assert(_pareto_front.size() <= Params::pop::size * 2); + assert(_mixed_pop.size() == 0); + // assert(_child_pop.size() == 0); + assert(this->_pop.size() == Params::pop::size); + } + const std::vector >& pareto_front() const { + return _pareto_front; + } + const pop_t& mixed_pop() { + return _mixed_pop; + } + const pop_t& parent_pop() { + return _parent_pop; + } + const pop_t& child_pop() { + return _child_pop; + } + protected: + + std::vector > _pareto_front; + + pop_t _parent_pop; + pop_t _child_pop; + pop_t _mixed_pop; + + void _update_pareto_front(const front_t& fronts) { + _convert_pop(fronts.front(), _pareto_front); + } + + void _convert_pop(const pop_t& pop1, + std::vector > & pop2) { + pop2.resize(pop1.size()); + for (size_t i = 0; i < pop1.size(); ++i) + pop2[i] = pop1[i]; + } + + void _eval_pop(pop_t& pop) { + this->_eval.eval(pop, 0, pop.size()); + } + + void _apply_modifier(pop_t& pop) { + _convert_pop(pop, this->_pop); + this->apply_modifier(); + } + void _fill_nondominated_sort(pop_t& mixed_pop, pop_t& new_pop) { + assert(mixed_pop.size()); + front_t fronts; +#ifndef NDEBUG + BOOST_FOREACH(indiv_t& ind, mixed_pop) + for (size_t i = 0; i < ind->fit().objs().size(); ++i) { + assert(!std::isnan(ind->fit().objs()[i])); + } +#endif + _rank_crowd(mixed_pop, fronts); + new_pop.clear(); + + // fill the i first layers + size_t i; + for (i = 0; i < fronts.size(); ++i) + if (fronts[i].size() + new_pop.size() < Params::pop::size) + new_pop.insert(new_pop.end(), fronts[i].begin(), fronts[i].end()); + else + break; + + size_t size = Params::pop::size - new_pop.size(); + // sort the last layer + if (new_pop.size() < Params::pop::size) { + std::sort(fronts[i].begin(), fronts[i].end(), crowd::compare_crowd()); + for (size_t k = 0; k < size ; ++k) { + assert(i < fronts.size()); + new_pop.push_back(fronts[i][k]); + } + } + assert(new_pop.size() == Params::pop::size); + } + + // + void _merge(const pop_t& pop1, const pop_t& pop2, pop_t& pop3) { + assert(pop1.size()); + assert(pop2.size()); + pop3.clear(); + pop3.insert(pop3.end(), pop1.begin(), pop1.end()); + pop3.insert(pop3.end(), pop2.begin(), pop2.end()); + assert(pop3.size() == pop1.size() + pop2.size()); + } + + // --- tournament selection --- + void _selection(pop_t& old_pop, pop_t& new_pop) { + new_pop.resize(old_pop.size()); + std::vector a1, a2; + misc::rand_ind(a1, old_pop.size()); + misc::rand_ind(a2, old_pop.size()); + // todo : this loop could be parallelized + for (size_t i = 0; i < old_pop.size(); i += 4) { + const indiv_t& p1 = _tournament(old_pop[a1[i]], old_pop[a1[i + 1]]); + const indiv_t& p2 = _tournament(old_pop[a1[i + 2]], old_pop[a1[i + 3]]); + const indiv_t& p3 = _tournament(old_pop[a2[i]], old_pop[a2[i + 1]]); + const indiv_t& p4 = _tournament(old_pop[a2[i + 2]], old_pop[a2[i + 3]]); + assert(i + 3 < new_pop.size()); + p1->cross(p2, new_pop[i], new_pop[i + 1]); + p3->cross(p4, new_pop[i + 2], new_pop[i + 3]); + } + } + + const indiv_t& _tournament(const indiv_t& i1, const indiv_t& i2) { + // if (i1->rank() < i2->rank()) + // return i1; + // else if (i2->rank() > i1->rank()) + // return i2; + // else if (misc::flip_coin()) + // return i1; + // else + // return i2; + + int flag = fit::dominate_flag(i1, i2); + if (flag == 1) + return i1; + if (flag == -1) + return i2; + if (i1->crowd() > i2->crowd()) + return i1; + if (i1->crowd() < i2->crowd()) + return i2; + if (misc::flip_coin()) + return i1; + else + return i2; + } + + // --- rank & crowd --- + + void _rank_crowd(pop_t& pop, front_t& fronts) { + std::vector ranks; +#ifndef NDEBUG + BOOST_FOREACH(indiv_t& ind, pop) + for (size_t i = 0; i < ind->fit().objs().size(); ++i) { + assert(!std::isnan(ind->fit().objs()[i])); + } +#endif + dom_sort(pop, fronts, ranks); + _update_pareto_front(fronts); + parallel::p_for(parallel::range_t(0, fronts.size()), + crowd::assign_crowd(fronts)); + + for (size_t i = 0; i < ranks.size(); ++i) + pop[i]->set_rank(ranks[i]); + parallel::sort(pop.begin(), pop.end(), crowd::compare_ranks());; + } + + void _assign_rank(pop_t& pop) { + int rank = 0; + fit::compare_pareto comp; + assert(pop.size()); + std::sort(pop.begin(), pop.end(), comp); + pop[0]->set_rank(0); + for (unsigned i = 1; i < pop.size(); ++i) { + assert(comp(pop[i-1], pop[i]) || comp.eq(pop[i -1], pop[i])); + if (comp(pop[i-1], pop[i])) + ++rank; + pop[i]->set_rank(rank); + } + } + + }; + } +} +#endif + + diff --git a/modules/dnns_easily_fooled/sferes/sferes/ea/rank_simple.hpp b/modules/dnns_easily_fooled/sferes/sferes/ea/rank_simple.hpp new file mode 100644 index 000000000..a4c1374fb --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/ea/rank_simple.hpp @@ -0,0 +1,96 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef RANK_SIMPLE_HPP_ +#define RANK_SIMPLE_HPP_ + +#include +#include +#include +#include +#include + +namespace sferes { + namespace ea { + SFERES_EA(RankSimple, Ea) { + public: + SFERES_CONST unsigned nb_keep = (unsigned)(Params::pop::keep_rate * Params::pop::size); + + void random_pop() { + this->_pop.resize(Params::pop::size * Params::pop::initial_aleat); + BOOST_FOREACH(boost::shared_ptr& indiv, this->_pop) { + indiv = boost::shared_ptr(new Phen()); + indiv->random(); + } + this->_eval.eval(this->_pop, 0, this->_pop.size()); + this->apply_modifier(); + std::partial_sort(this->_pop.begin(), this->_pop.begin() + Params::pop::size, + this->_pop.end(), fit::compare()); + this->_pop.resize(Params::pop::size); + } + void epoch() { + assert(this->_pop.size()); + for (unsigned i = nb_keep; i < this->_pop.size(); i += 2) { + unsigned r1 = _random_rank(); + unsigned r2 = _random_rank(); + boost::shared_ptr i1, i2; + this->_pop[r1]->cross(this->_pop[r2], i1, i2); + i1->mutate(); + i2->mutate(); + this->_pop[i] = i1; + this->_pop[i + 1] = i2; + } +#ifndef EA_EVAL_ALL + this->_eval.eval(this->_pop, nb_keep, Params::pop::size); +#else + this->_eval.eval(this->_pop, 0, Params::pop::size); +#endif + this->apply_modifier(); + std::partial_sort(this->_pop.begin(), this->_pop.begin() + nb_keep, + this->_pop.end(), fit::compare()); + dbg::out(dbg::info, "ea")<<"best fitness: " << this->_pop[0]->fit().value() << std::endl; + } + protected: + unsigned _random_rank() { + static float kappa = pow(Params::pop::coeff, nb_keep + 1.0f) - 1.0f; + static float facteur = nb_keep / ::log(kappa + 1); + return (unsigned) (this->_pop.size() - facteur * log(misc::rand(1) * kappa + 1)); + } + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/eval/eval.hpp b/modules/dnns_easily_fooled/sferes/sferes/eval/eval.hpp new file mode 100644 index 000000000..23b32566a --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/eval/eval.hpp @@ -0,0 +1,68 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef EVAL_HPP_ +#define EVAL_HPP_ + +#include +#include +#include +#include + +namespace sferes { + namespace eval { + SFERES_CLASS(Eval) { + public: + template + void eval(std::vector >& pop, size_t begin, size_t end) { + dbg::trace trace("eval", DBG_HERE); + assert(pop.size()); + assert(begin < pop.size()); + assert(end <= pop.size()); + for (size_t i = begin; i < end; ++i) { + pop[i]->develop(); + pop[i]->fit().eval(*pop[i]); + } + } + protected: + }; + } +} + +#define SFERES_EVAL SFERES_CLASS + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/eval/mpi.hpp b/modules/dnns_easily_fooled/sferes/sferes/eval/mpi.hpp new file mode 100644 index 000000000..1f66e2eea --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/eval/mpi.hpp @@ -0,0 +1,167 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef EVAL_MPI_HPP_ +#define EVAL_MPI_HPP_ + +#include +#include + +//#ifndef BOOST_MPI_HAS_NOARG_INITIALIZATION +//#error MPI need arguments (we require a full MPI2 implementation) +//#endif + +#define MPI_INFO dbg::out(dbg::info, "mpi")<<"["<<_world->rank()<<"] " +namespace sferes { + + namespace eval { + SFERES_CLASS(Mpi) { + public: + Mpi() { + static char* argv[] = {(char*)"sferes2", 0x0}; + char** argv2 = (char**) malloc(sizeof(char*) * 2); + int argc = 1; + argv2[0] = argv[0]; + argv2[1] = argv[1]; + using namespace boost; + dbg::out(dbg::info, "mpi")<<"Initializing MPI..."<(new mpi::environment(argc, argv2, true)); + dbg::out(dbg::info, "mpi")<<"MPI initialized"<(new mpi::communicator()); + MPI_INFO << "communicator initialized"< + void eval(std::vector >& pop, + size_t begin, size_t end) { + dbg::trace("mpi", DBG_HERE); + if (_world->rank() == 0) + _master_loop(pop, begin, end); + else + _slave_loop(); + } + ~Mpi() { + MPI_INFO << "Finalizing MPI..."<rank() == 0) + for (size_t i = 1; i < _world->size(); ++i) + _world->send(i, _env->max_tag(), s); + _finalize(); + } + protected: + void _finalize() { + _world = boost::shared_ptr(); + dbg::out(dbg::info, "mpi")<<"MPI world destroyed"<(); + dbg::out(dbg::info, "mpi")<<"environment destroyed"< + void _master_loop(std::vector >& pop, + size_t begin, size_t end) { + dbg::trace("mpi", DBG_HERE); + size_t current = begin; + std::vector evaluated(pop.size()); + std::fill(evaluated.begin(), evaluated.end(), false); + // first round + for (size_t i = 1; i < _world->size() && current < end; ++i) { + MPI_INFO << "[master] [send-init...] ->" <send(i, current, pop[current]->gen()); + MPI_INFO << "[master] [send-init ok] ->" <" <send(s.source(), current, pop[current]->gen()); + MPI_INFO << "[master] [send ok] ->" < + boost::mpi::status _recv(std::vector& evaluated, + std::vector >& pop) { + dbg::trace("mpi", DBG_HERE); + using namespace boost::mpi; + status s = _world->probe(); + MPI_INFO << "[rcv...]" << getpid() << " tag=" << s.tag() << std::endl; + _world->recv(s.source(), s.tag(), pop[s.tag()]->fit()); + MPI_INFO << "[rcv ok]" << " tag=" << s.tag() << std::endl; + evaluated[s.tag()] = true; + return s; + } + template + void _slave_loop() { + dbg::trace("mpi", DBG_HERE); + while(true) { + Phen p; + boost::mpi::status s = _world->probe(); + if (s.tag() == _env->max_tag()) { + MPI_INFO << "[slave] Quit requested" << std::endl; + MPI_Finalize(); + exit(0); + } else { + MPI_INFO <<"[slave] [rcv...] [" << getpid()<< "]" << std::endl; + _world->recv(0, s.tag(), p.gen()); + MPI_INFO <<"[slave] [rcv ok] " << " tag="< +#include +#include + +namespace sferes { + + namespace eval { + template + struct _parallel_evaluate { + typedef std::vector > pop_t; + pop_t _pop; + + ~_parallel_evaluate() { } + _parallel_evaluate(pop_t& pop) : _pop(pop) {} + _parallel_evaluate(const _parallel_evaluate& ev) : _pop(ev._pop) {} + void operator() (const parallel::range_t& r) const { + for (size_t i = r.begin(); i != r.end(); ++i) { + assert(i < _pop.size()); + _pop[i]->develop(); + _pop[i]->fit().eval(*_pop[i]); + for (size_t j = 0; j < _pop[i]->fit().objs().size(); ++j) { + assert(!std::isnan(_pop[i]->fit().objs()[j])); + } + } + } + }; + + SFERES_CLASS(Parallel) { + public: + template + void eval(std::vector >& pop, size_t begin, size_t end) { + dbg::trace trace("eval", DBG_HERE); + assert(pop.size()); + assert(begin < pop.size()); + assert(end <= pop.size()); + parallel::init(); + parallel::p_for(parallel::range_t(begin, end), + _parallel_evaluate(pop)); + } + + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/eval/parallel_eval.hpp b/modules/dnns_easily_fooled/sferes/sferes/eval/parallel_eval.hpp new file mode 100644 index 000000000..2e5338646 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/eval/parallel_eval.hpp @@ -0,0 +1,83 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef EVAL_PARALLEL_EVAL_HPP_ +#define EVAL_PARALLEL_EVAL_HPP_ + +#include + +namespace sferes { + + namespace eval { + template + struct _parallel_eval { + typedef std::vector > pop_t; + pop_t _pop; + + ~_parallel_eval() { } + _parallel_eval(pop_t& pop) : _pop(pop) {} + _parallel_eval(const _parallel_eval& ev) : _pop(ev._pop) {} + void operator() (const parallel::range_t& r) const { + for (size_t i = r.begin(); i != r.end(); ++i) { + assert(i < _pop.size()); + _pop[i]->fit().eval(*_pop[i]); + } + } + }; + + // parallelize only the evaluation (not the development) + SFERES_CLASS(ParallelEval) { + public: + template + void eval(std::vector >& pop, size_t begin, size_t end) { + assert(pop.size()); + assert(begin < pop.size()); + assert(end <= pop.size()); + dbg::trace trace(DBG_HERE); + BOOST_FOREACH(boost::shared_ptr& p, pop) + p->develop(); + parallel::init(); + parallel::p_for(parallel::range_t(begin, end), + _parallel_eval(pop)); + } + + }; + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/fit/fit_simu.hpp b/modules/dnns_easily_fooled/sferes/sferes/fit/fit_simu.hpp new file mode 100644 index 000000000..686f22c43 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/fit/fit_simu.hpp @@ -0,0 +1,46 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef FIT_SIMU_HPP +#define FIT_SIMU_HPP + +namespace sferes { + namespace fit { + } +}; + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/fit/fitness.hpp b/modules/dnns_easily_fooled/sferes/sferes/fit/fitness.hpp new file mode 100644 index 000000000..db1c2abc2 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/fit/fitness.hpp @@ -0,0 +1,203 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef FITNESS_HPP_ +#define FITNESS_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define SFERES_FITNESS SFERES_CLASS_D + +namespace sferes { + namespace fit { + + namespace mode { + enum mode_t { eval = 0, view, usr1, usr2, usr3, usr4, usr5 }; + } + SFERES_CLASS(Fitness) { + public: + Fitness() : _value(0), _mode(mode::eval) {} + float value() const { + return _value; + } + const std::vector& objs() const { + return _objs; + } + void add_obj() { + _objs.resize(_objs.size() + 1); + } + float obj(size_t i) const { + assert(i < _objs.size()); + assert(!std::isnan(_objs[i])); + return _objs[i]; + } + void set_obj(size_t i, float v) { + assert(i < _objs.size()); + assert(!std::isnan(v)); + _objs[i] = v; + } + template + void eval(Indiv& i) { + stc::exact(this)->eval(i); + } + template + void serialize(Archive & ar, const unsigned int version) { + dbg::trace trace("fit", DBG_HERE); + ar & BOOST_SERIALIZATION_NVP(_value); + ar & BOOST_SERIALIZATION_NVP(_objs); + } + void set_mode(mode::mode_t m) { + _mode = m; + } + mode::mode_t mode() const { + return _mode; + } + protected: + float _value; + std::vector _objs; + mode::mode_t _mode; + }; + + struct compare { + template + bool operator()(const boost::shared_ptr i1, const boost::shared_ptr i2) const { + return i1->fit().value() > i2->fit().value(); + } + }; + + struct compare_obj { + compare_obj(unsigned i) : _i(i) {} + template + bool operator()(const boost::shared_ptr i1, const boost::shared_ptr i2) const { + assert(_i < i1->fit().objs().size()); + assert(_i < i2->fit().objs().size()); + assert(i1->fit().objs().size()); + assert(i2->fit().objs().size()); + return i1->fit().obj(_i) > i2->fit().obj(_i); + } + size_t _i; + }; + // lexical order + struct compare_objs_lex { + compare_objs_lex() {} + template + bool operator()(const boost::shared_ptr i1, const boost::shared_ptr i2) const { + assert(i1->fit().objs().size() == i2->fit().objs().size()); + assert(i1->fit().objs().size()); + assert(i2->fit().objs().size()); + for (size_t i = 0; i < i1->fit().objs().size(); ++i) + if (i1->fit().obj(i) > i2->fit().obj(i)) + return true; + else if (i1->fit().obj(i) < i2->fit().obj(i)) + return false; + return false; + } + + }; + + // returns : + // 1 if i1 dominates i2 + // -1 if i2 dominates i1 + // 0 if both a and b are non-dominated + template + inline int dominate_flag(const boost::shared_ptr i1, const boost::shared_ptr i2) { + assert(i1->fit().objs().size()); + assert(i2->fit().objs().size()); + if (i1->fit().objs().size() != i2->fit().objs().size()) + std::cout<fit().objs().size()<<" vs "<fit().objs().size()<fit().objs().size() == i2->fit().objs().size()); + + size_t nb_objs = i1->fit().objs().size(); + assert(nb_objs); + + bool flag1 = false, flag2 = false; + for (unsigned i = 0; i < nb_objs; ++i) { + float fi1 = i1->fit().obj(i); + float fi2 = i2->fit().obj(i); + if (fi1 > fi2) + flag1 = true; + else if (fi2 > fi1) + flag2 = true; + } + if (flag1 && !flag2) + return 1; + else if (!flag1 && flag2) + return -1; + else + return 0; + } + + // true if i1 dominate i2 + template + inline bool dominate(const boost::shared_ptr i1, const boost::shared_ptr i2) { + return (dominate_flag(i1, i2) == 1); + } + + + struct compare_pareto { + template + int operator()(const boost::shared_ptr i1, const boost::shared_ptr i2) const { + return dominate_flag(i1, i2); + } + template + bool eq(const boost::shared_ptr i1, const boost::shared_ptr i2) { + bool c1 = operator()(i1, i2); + bool c2 = operator()(i2, i1); + return !c1 && !c2; + } + }; + + SFERES_CLASS_D(FitDummy, Fitness) { + public: + template + void eval(Indiv& i) { + } + }; + } +} + + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/fit/fitness_simu.hpp b/modules/dnns_easily_fooled/sferes/sferes/fit/fitness_simu.hpp new file mode 100644 index 000000000..8b052a92b --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/fit/fitness_simu.hpp @@ -0,0 +1,269 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef FITNESS_SIMU_HPP +#define FITNESS_SIMU_HPP + +#include +#include +#include +#include + +#define SFERES_FITNESS_SIMU(Class, Parent) \ + template \ + class Class : public Parent, Exact>::ret> + + + +namespace sferes { + namespace fit { + namespace state { + enum state_t { not_started, running, end_exp, end_eval, fast_fw, eval_done }; + } + + template + class FitnessSimu : public stc::Any { + public: + typedef Simu simu_t; + FitnessSimu() : + _step(0), + _exp_step(0), + _nb_exps(0), + _value(0.0f), + _state(state::not_started), + _mode(mode::eval) { + } + + template + void eval(Phen& p) { + + dbg::out(dbg::info, "fit")<<"eval mode="<mode()<<" (mode view="<mode() == mode::view) + _simu.init_view(); + if (_state == state::eval_done) + return; + _state = state::not_started; + while (_state != state::end_eval) + _exp(p); + _state = state::eval_done; + } + template + int refresh(Phen& p) { + return stc::exact(this)->refresh(p); + } + template + void refresh_end_exp(Phen& p) { + dbg::trace t1("fit", DBG_HERE); + stc::exact(this)->refresh_end_exp(p); + } + template + void refresh_end_eval(Phen& p) { + dbg::trace t1("fit", DBG_HERE); + stc::exact(this)->refresh_end_eval(p); + } + template + void scheduler(Phen& p) { + dbg::trace t1("fit", DBG_HERE); + stc::exact(this)->scheduler(p); + } + + template + void new_exp(Phen& p) { + dbg::out(dbg::info, "fit")<<"new_exp, _step="<<_step< + void end_exp(Phen& p) { + dbg::out(dbg::info, "fit")<<"end_exp, _step="<<_step< + void end_eval(Phen& p) { + assert(_state == state::end_exp); + dbg::out(dbg::info, "fit")<<"end_eval, _step="<<_step<& objs() const { + assert(!_objs.empty()); + return _objs; + } + float obj(size_t i) const { + assert(i < _objs.size()); + return _objs[i]; + } + + template + void serialize(Archive & ar, const unsigned int version) { + ar & BOOST_SERIALIZATION_NVP(_value); + ar & BOOST_SERIALIZATION_NVP(_objs); + } + + void set_mode(mode::mode_t m) { + _mode = m; + } + mode::mode_t mode() const { + return _mode; + } + + protected: + Simu _simu; + Agent _agent; + int _step; + size_t _exp_step; + size_t _nb_exps; + std::vector _objs; + float _value; + state::state_t _state; + mode::mode_t _mode; + + template + void _exp(Phen& p) { + dbg::out(dbg::tracing, "fit")<<"starting _step = " + <<_step<<" state="<<_state< + void _goto_next_exp(Phen& p) { + dbg::trace t1("fit", DBG_HERE); + dbg::out(dbg::tracing, "fit")<<"exp stopped, _step = "<<_step<<" state="<<_state<_state == ::sferes::fit::state::running \ + && this->_step == K) + +#define EVERY(K, E) assert(K != 0); \ + if (this->_state == ::sferes::fit::state::running \ + && ((this->_step + K) % E) == 0) + +#define SFERES_SCHEDULER() \ + template \ + void scheduler(Phen& p) + + +#define NEW_EXP(K) { if (this->_step == K) this->new_exp(p); } +#define END_EXP(K) { if (this->_step == K) this->end_exp(p); } +#define END_EVAL(K) { if (this->_step == K) this->end_eval(p); } + + SFERES_FITNESS_SIMU(FitnessSimuDummy, FitnessSimu) { + public: + template + int refresh(Phen& p) { + return 0; + } + template + void refresh_end_exp(Phen& p) { } + template + void refresh_end_eval(Phen& p) {} + SFERES_SCHEDULER() { + NEW_EXP(0); + END_EXP(1); + END_EVAL(1); + } + }; + + } +}; + + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/gen/bit_string.hpp b/modules/dnns_easily_fooled/sferes/sferes/gen/bit_string.hpp new file mode 100644 index 000000000..b67934abd --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/gen/bit_string.hpp @@ -0,0 +1,177 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef BITSTRING_HPP_ +#define BITSTRING_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace boost { + namespace serialization { + template + void save(Archive& ar, const std::bitset& bs, const unsigned int version) { + std::string s = bs.to_string(); + ar << BOOST_SERIALIZATION_NVP(s); + } + + template + void load(Archive& ar, std::bitset& bs, const unsigned int version) { + std::string s; + ar >> BOOST_SERIALIZATION_NVP(s); + assert(s.size() == bs.size()); + bs = 0; + for (size_t i = 0; i < Nb; ++i) + bs[Nb - i - 1] = (s[i] == '1'); + } + template + void serialize(Archive& ar, std::bitset& bs, const unsigned int version) { + boost::serialization::split_free(ar, bs, version); + } + } +} + +namespace sferes { + namespace gen { + namespace _bitstring { + template + struct _pow { + SFERES_CONST double result = K * _pow::result; + }; + template + struct _pow { + SFERES_CONST double result = K; + }; + + } + /// in range [0;1] + template + class BitString : public stc::Any { + public: + typedef Params params_t; + typedef BitString this_t; + typedef std::bitset bs_t; + SFERES_CONST double bs_max = _bitstring::_pow<2, Params::bit_string::nb_bits>::result - 1; + + BitString() : _data(Size) { + } + + //@{ + void mutate() { + BOOST_FOREACH(bs_t & b, _data) + if (misc::rand() < Params::bit_string::mutation_rate) + for (size_t i = 0; i < b.size(); ++i) + if (misc::rand() < Params::bit_string::mutation_rate_bit) + b[i].flip(); + } + // 1-point cross-over + void cross(const BitString& o, BitString& c1, BitString& c2) { + assert(Size == _data.size()); + assert(c1._data.size() == _data.size()); + assert(c2._data.size() == _data.size()); + + for (size_t i = 0; i < c1._data.size(); ++i) { + size_t k = misc::rand(c1._data.size()); + for (size_t j = 0; j < c1._data[i].size(); ++j) + if (j < k) { + c1._data[i][j] = _data[i][j]; + c2._data[i][j] = o._data[i][j]; + } else { + c1._data[i][j] = o._data[i][j]; + c2._data[i][j] = _data[i][j]; + } + } + } + void random() { + BOOST_FOREACH(bs_t & v, _data) + for (size_t i = 0; i < v.size(); ++i) + v[i] = (int) misc::flip_coin(); + } + //@} + + //@{ + float data(size_t i) const { + assert(bs_max != 0); + assert(i < _data.size()); + return _to_double(_data[i]) / bs_max; + } + unsigned long int_data(size_t i) const { + assert(i < _data.size()); + return _data[i].to_ulong(); + } + + bs_t bs_data(size_t i) const { + assert(i < _data.size()); + return _data[i]; + } + + size_t size() const { + return Size; + } + //@} + + template + void serialize(Archive& ar, const unsigned int version) { + ar& BOOST_SERIALIZATION_NVP(_data); + } + protected: + template + double _to_double(const std::bitset& d) const { + double x = 0; + size_t k = 1; + for (size_t i = 0; i < N; ++i) { + x += d[i] * k; + k *= 2; + } + return x; + } + std::vector _data; + }; + + } // gen +} // sferes + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/gen/cmaes.hpp b/modules/dnns_easily_fooled/sferes/sferes/gen/cmaes.hpp new file mode 100644 index 000000000..916f456a7 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/gen/cmaes.hpp @@ -0,0 +1,134 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef GEN_CMAES_HPP_ +#define GEN_CMAES_HPP_ + +#ifdef EIGEN3_ENABLED + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + + +namespace sferes { + namespace gen { + // this class requires EIGEN3 (libEIGEN3-dev) + // REFERENCE: + // Hansen, N. and S. Kern (2004). Evaluating the CMA Evolution + // Strategy on Multimodal Test Functions. Eighth International + // Conference on Parallel Problem Solving from Nature PPSN VIII, + // Proceedings, pp. 282-291, Berlin: Springer. + // (http://www.bionik.tu-berlin.de/user/niko/ppsn2004hansenkern.pdf) + template + class Cmaes : public stc::Any { + public: + typedef Params params_t; + typedef Cmaes this_t; + typedef Eigen::Matrix vector_t; + typedef Eigen::Matrix matrix_t; + SFERES_CONST size_t es_size = Size; + Cmaes() : _arx(vector_t::Zero()) { } + + void random() { + } + void mutate(const vector_t& xmean, + float sigma, + const matrix_t& B, + const matrix_t& D) { + for (size_t i = 0; i < Size; ++i) + _arz[i] = misc::gaussian_rand(); + _arx = xmean + sigma * (B * D * _arz); + } + + float data(size_t i) const { + assert(i < _arx.size()); + return _arx[i]; + } + const vector_t& data() const { + return _arx; + } + const vector_t& arx() const { + return _arx; + } + const vector_t& arz() const { + return _arz; + } + + size_t size() const { + return Size; + } + + template + void save(Archive& a, const unsigned version) const { + std::vector v(Size); + for (size_t i = 0; i < Size; ++i) + v[i] = _arx[i]; + a & BOOST_SERIALIZATION_NVP(v); + } + template + void load(Archive& a, const unsigned version) { + std::vector v; + a & BOOST_SERIALIZATION_NVP(v); + assert(v.size() == Size); + for (size_t i = 0; i < Size; ++i) + _arx[i] = v[i]; + } + BOOST_SERIALIZATION_SPLIT_MEMBER(); + protected: + vector_t _arx, _arz; + }; + } // gen +} // sferes + +#else +#warning Eigen3 is disabled -> no CMAES +#endif + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/gen/evo_float.hpp b/modules/dnns_easily_fooled/sferes/sferes/gen/evo_float.hpp new file mode 100644 index 000000000..bc7a7e466 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/gen/evo_float.hpp @@ -0,0 +1,245 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef EVO_FLOAT_HPP_ +#define EVO_FLOAT_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace sferes { + namespace gen { + namespace evo_float { + enum mutation_t { polynomial = 0, gaussian, uniform }; + enum cross_over_t { recombination = 0, sbx, no_cross_over }; + + template + struct Mutation_f { + void operator()(Ev& ev, size_t i) { + assert(0); + } + }; + template + struct CrossOver_f { + void operator()(const Ev& f1, const Ev& f2, Ev &c1, Ev &c2) { + assert(0); + } + }; + } + + /// in range [0;1] + template + class EvoFloat : + public Float, Exact>::ret> { + public: + typedef Params params_t; + typedef EvoFloat this_t; + + EvoFloat() {} + + //@{ + void mutate() { + for (size_t i = 0; i < Size; i++) + if (misc::rand() < Params::evo_float::mutation_rate) + _mutation_op(*this, i); + _check_invariant(); + } + void cross(const EvoFloat& o, EvoFloat& c1, EvoFloat& c2) { + if (Params::evo_float::cross_over_type != evo_float::no_cross_over && + misc::rand() < Params::evo_float::cross_rate) + _cross_over_op(*this, o, c1, c2); + else if (misc::flip_coin()) { + c1 = *this; + c2 = o; + } else { + c1 = o; + c2 = *this; + } + _check_invariant(); + } + void random() { + BOOST_FOREACH(float &v, this->_data) v = misc::rand(); + _check_invariant(); + } + //@} + + protected: + evo_float::Mutation_f _mutation_op; + evo_float::CrossOver_f _cross_over_op; + void _check_invariant() const { +#ifdef DBG_ENABLED + BOOST_FOREACH(float p, this->_data) { + assert(!std::isnan(p)); + assert(!std::isinf(p)); + assert(p >= 0 && p <= 1); + } +#endif + } + }; + + // partial specialization for operators + namespace evo_float { + // polynomial mutation. Cf Deb 2001, p 124 ; param: eta_m + // perturbation of the order O(1/eta_m) + template + struct Mutation_f { + void operator()(Ev& ev, size_t i) { + SFERES_CONST float eta_m = Ev::params_t::evo_float::eta_m; + assert(eta_m != -1.0f); + float ri = misc::rand(); + float delta_i = ri < 0.5 ? + pow(2.0 * ri, 1.0 / (eta_m + 1.0)) - 1.0 : + 1 - pow(2.0 * (1.0 - ri), 1.0 / (eta_m + 1.0)); + assert(!std::isnan(delta_i)); + assert(!std::isinf(delta_i)); + float f = ev.data(i) + delta_i; + ev.data(i, misc::put_in_range(f, 0.0f, 1.0f)); + } + }; + + // gaussian mutation + template + struct Mutation_f { + void operator()(Ev& ev, size_t i) { + SFERES_CONST float sigma = Ev::params_t::evo_float::sigma; + float f = ev.data(i) + + misc::gaussian_rand(0, sigma * sigma); + ev.data(i, misc::put_in_range(f, 0.0f, 1.0f)); + } + }; + // uniform mutation + template + struct Mutation_f { + void operator()(Ev& ev, size_t i) { + SFERES_CONST float max = Ev::params_t::evo_float::max; + float f = ev.data(i) + + misc::rand(max) - max / 2.0f; + ev.data(i, misc::put_in_range(f, 0.0f, 1.0f)); + } + }; + + // recombination + template + struct CrossOver_f { + void operator()(const Ev& f1, const Ev& f2, Ev &c1, Ev &c2) { + size_t k = misc::rand(f1.size()); + for (size_t i = 0; i < k; ++i) { + c1.data(i, f1.data(i)); + c2.data(i, f2.data(i)); + } + for (size_t i = k; i < f1.size(); ++i) { + c1.data(i, f2.data(i)); + c2.data(i, f1.data(i)); + } + } + }; + + // no cross-over + template + struct CrossOver_f { + void operator()(const Ev& f1, const Ev& f2, Ev &c1, Ev &c2) { + } + }; + + // SBX (cf Deb 2001, p 113) Simulated Binary Crossover + // suggested eta : 15 + /// WARNING : this code is from deb's code (different from the + // article ...) + // A large value ef eta gives a higher probablitity for + // creating a `near-parent' solutions and a small value allows + // distant solutions to be selected as offspring. + template + struct CrossOver_f { + void operator()(const Ev& f1, const Ev& f2, Ev &child1, Ev &child2) { + SFERES_CONST float eta_c = Ev::params_t::evo_float::eta_c; + assert(eta_c != -1); + for (unsigned int i = 0; i < f1.size(); i++) { + float y1 = std::min(f1.data(i), f2.data(i)); + float y2 = std::max(f1.data(i), f2.data(i)); + SFERES_CONST float yl = 0.0; + SFERES_CONST float yu = 1.0; + if (fabs(y1 - y2) > std::numeric_limits::epsilon()) { + float rand = misc::rand(); + float beta = 1.0 + (2.0 * (y1 - yl) / (y2 - y1)); + float alpha = 2.0 - pow(beta, -(eta_c + 1.0)); + float betaq = 0; + if (rand <= (1.0 / alpha)) + betaq = pow((rand * alpha), (1.0 / (eta_c + 1.0))); + else + betaq = pow ((1.0 / (2.0 - rand * alpha)) , (1.0 / (eta_c + 1.0))); + float c1 = 0.5 * ((y1 + y2) - betaq * (y2 - y1)); + beta = 1.0 + (2.0 * (yu - y2) / (y2 - y1)); + alpha = 2.0 - pow(beta, -(eta_c + 1.0)); + if (rand <= (1.0 / alpha)) + betaq = pow ((rand * alpha), (1.0 / (eta_c + 1.0))); + else + betaq = pow ((1.0/(2.0 - rand * alpha)), (1.0 / (eta_c + 1.0))); + float c2 = 0.5 * ((y1 + y2) + betaq * (y2 - y1)); + + c1 = misc::put_in_range(c1, yl, yu); + c2 = misc::put_in_range(c2, yl, yu); + + assert(!std::isnan(c1)); + assert(!std::isnan(c2)); + + if (misc::flip_coin()) { + child1.data(i, c1); + child2.data(i, c2); + } else { + child1.data(i, c2); + child2.data(i, c1); + } + } + } + } + }; + + } //evo_float + } // gen +} // sferes + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/gen/float.hpp b/modules/dnns_easily_fooled/sferes/sferes/gen/float.hpp new file mode 100644 index 000000000..dbcc319d3 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/gen/float.hpp @@ -0,0 +1,107 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef FLOAT_HPP_ +#define FLOAT_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace sferes { + namespace gen { + // A basic class that represent an array of float, typically in range [0;1] + // it is used by CMAES and EvoFloat derives from this class + template + class Float : public stc::Any { + public: + typedef Params params_t; + typedef Float this_t; + SFERES_CONST size_t gen_size = Size; + + Float() : _data(Size) { + std::fill(_data.begin(), _data.end(), 0.5f); + } + + //@{ + void mutate() { + assert(0);//should not be used (use evo_float) + } + void cross(const Float& o, Float& c1, Float& c2) { + assert(0); // should not be used (use evo_float) + } + void random() { + assert(0); // should not be used (use evo_float) + } + //@} + + //@{ + float data(size_t i) const { + assert(this->_data.size()); + assert(i < this->_data.size()); + assert(!std::isinf(this->_data[i])); + assert(!std::isnan(this->_data[i])); + return this->_data[i]; + } + void data(size_t i, float v) { + assert(this->_data.size()); + assert(i < this->_data.size()); + assert(!std::isinf(v)); + assert(!std::isnan(v)); + this->_data[i] = v; + } + size_t size() const { + return Size; + } + //@} + template + void serialize(Archive & ar, const unsigned int version) { + ar & BOOST_SERIALIZATION_NVP(_data); + } + protected: + std::vector _data; + }; + } // gen +} // sferes + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/gen/sampled.hpp b/modules/dnns_easily_fooled/sferes/sferes/gen/sampled.hpp new file mode 100644 index 000000000..5bbe6d951 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/gen/sampled.hpp @@ -0,0 +1,153 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + +#ifndef GEN_SAMPLED_HPP_ +#define GEN_SAMPLED_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace sferes { + namespace gen { + template + class Sampled : public stc::Any { + public: + typedef Params params_t; + typedef Sampled this_t; + typedef typename Params::sampled::values_t values_t; + Sampled() : _data(Size) { + } + + //@{ + void mutate() { + if (Params::sampled::ordered) { + for (size_t i = 0; i < _data.size(); ++i) + if (misc::rand() < Params::sampled::mutation_rate) { + if (misc::flip_coin()) + _data[i] = std::max(0, (int)_data[i] - 1); + else + _data[i] = std::min((int)Params::sampled::values_size() - 1, + (int)_data[i] + 1); + } + } else { + BOOST_FOREACH(size_t & v, _data) + if (misc::rand() < Params::sampled::mutation_rate) + v = misc::rand(0, Params::sampled::values_size()); + _check_invariant(); + } + _check_invariant(); + } + + // 1-point cross-over + void cross(const Sampled& o, Sampled& c1, Sampled& c2) { + assert(c1._data.size()); + assert(c1._data.size() == c2._data.size()); + if (misc::rand() < Params::sampled::cross_rate) { + size_t k = misc::rand(c1._data.size()); + for (size_t j = 0; j < c1._data.size(); ++j) + if (j < k) { + c1._data[j] = _data[j]; + c2._data[j] = o._data[j]; + } else { + c1._data[j] = o._data[j]; + c2._data[j] = _data[j]; + } + } else { + c1 = *this; + c2 = o; + } + c1._check_invariant(); + c2._check_invariant(); + } + void random() { + BOOST_FOREACH(size_t & v, _data) + v = misc::rand(0, Params::sampled::values_size()); + _check_invariant(); + } + //@} + + //@{ + values_t data(size_t i) const { + assert(i < _data.size()); + assert(i >= 0); + _check_invariant(); + return Params::sampled::values(_data[i]); + } + size_t data_index(size_t i) const { + assert(i < _data.size()); + assert(i >= 0); + _check_invariant(); + return _data[i]; + } + void set_data(size_t pos, size_t k) { + assert(pos < _data.size()); + _data[pos] = k; + _check_invariant(); + } + size_t size() const { + return Size; + } + //@} + + template + void serialize(Archive& ar, const unsigned int version) { + ar& BOOST_SERIALIZATION_NVP(_data); + } + protected: + void _check_invariant() const { +#ifndef NDEBUG + for (size_t i = 0; i < _data.size(); ++i) { + assert(_data[i] >= 0); + assert(_data[i] < Params::sampled::values_size()); + } +#endif + } + std::vector _data; + }; + + } // gen +} // sferes + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/misc.hpp b/modules/dnns_easily_fooled/sferes/sferes/misc.hpp new file mode 100644 index 000000000..8e0de5cea --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/misc.hpp @@ -0,0 +1,44 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef MISC_HPP_ +#define MISC_HPP_ + +#include "misc/rand.hpp" +#include "misc/range.hpp" +#include "misc/sys.hpp" +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/misc/rand.hpp b/modules/dnns_easily_fooled/sferes/sferes/misc/rand.hpp new file mode 100644 index 000000000..850b2804c --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/misc/rand.hpp @@ -0,0 +1,122 @@ + + +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef RAND_HPP_ +#define RAND_HPP_ + +#include +#include +#include +#include +#include +#include + +// someday we will have a real thread-safe random number generator... +namespace sferes { + namespace misc { + // NOT Thread-safe ! + template + inline T rand(T max = 1.0) { + assert(max > 0); + T v; + do + v = T(((double)max * ::rand())/(RAND_MAX + 1.0)); + while(v >= max); // this strange case happened... precision problem? + assert(v < max); + return v; + } + + + template + inline T rand(T min, T max) { + assert(max != min); + assert(max > min); + T res = T(rand() * ((long int) max - (long int) min) + min); + assert(res >= min); + assert(res < max); + return res; + } + + template + inline T gaussian_rand(T m=0.0,T v=1.0) { + float facteur = sqrt(-2.0f * log(rand())); + float trigo = 2.0f * M_PI * rand(); + + return T(m + v * facteur * cos(trigo)); + + } + + inline void rand_ind(std::vector& a1, size_t size) { + a1.resize(size); + for (size_t i = 0; i < a1.size(); ++i) + a1[i] = i; + for (size_t i = 0; i < a1.size(); ++i) { + size_t k = rand(i, a1.size()); + assert(k < a1.size()); + boost::swap(a1[i], a1[k]); + } + } + + + /// return a random it in the list + template + inline typename std::list::iterator rand_in_list(std::list& l) { + int n = rand(l.size()); + typename std::list::iterator it = l.begin(); + for (int i = 0; i < n; ++i) + ++it; + return it; + } + + + inline bool flip_coin() { + return rand() < 0.5f; + } + + template + inline typename L::iterator rand_l(L& l) { + size_t k = rand(l.size()); + typename L::iterator it = l.begin(); + for (size_t i = 0; i < k; ++i) + ++it; + return it; + } + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/misc/range.hpp b/modules/dnns_easily_fooled/sferes/sferes/misc/range.hpp new file mode 100644 index 000000000..2fe1a3bce --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/misc/range.hpp @@ -0,0 +1,76 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef RANGE_HPP_ +#define RANGE_HPP_ +#include + +namespace sferes { + namespace misc { + template + T1 put_in_range(T1 x, T2 min_, T3 max_) { + assert(max_ >= min_); + if (x < min_) + return min_; + else if (x > max_) + return max_; + else + return x; + } + + // scale a [0;1] value into to [min, max] + template + T1 scale(T1 x, T2 min_, T3 max_) { + assert(x >= 0); + assert(x <= 1); + assert(max_ > min_); + return x * (max_ - min_) + min_; + } + // scale a [min, max] value to [0, 1] + template + T1 unscale(T1 x, T2 min_, T3 max_) { + x = std::max((T1)x, (T1)min_); + x = std::min((T1)x, (T1)max_); + return (x - min_) / (max_ - min_); + } + + + + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/misc/sys.hpp b/modules/dnns_easily_fooled/sferes/sferes/misc/sys.hpp new file mode 100644 index 000000000..c7eeedd5a --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/misc/sys.hpp @@ -0,0 +1,69 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef SYS_HPP_ +#define SYS_HPP_ + +#include +#include +#include + +namespace sferes { + namespace misc { + inline std::string date() { + char date[30]; + time_t date_time; + time(&date_time); + strftime(date, 30, "%Y-%m-%d_%H_%M_%S", localtime(&date_time)); + return date; + } + + inline std::string hostname() { + char hostname[30]; + int res = gethostname(hostname, 30); + assert(res == 0); + res = 0; // avoid a warning in opt mode + return std::string(hostname); + } + + inline std::string getpid() { + return boost::lexical_cast(::getpid()); + } + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/modif/diversity.hpp b/modules/dnns_easily_fooled/sferes/sferes/modif/diversity.hpp new file mode 100644 index 000000000..900ab2d0f --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/modif/diversity.hpp @@ -0,0 +1,88 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef MODIFIER_DIV_HPP +#define MODIFIER_DIV_HPP + +#include + +namespace sferes { + namespace modif { + namespace modifier_div { + template + struct _parallel_div { + typedef std::vector > pop_t; + pop_t _pop; + + ~_parallel_div() { } + _parallel_div(pop_t& pop) : _pop(pop) {} + _parallel_div(const _parallel_div& ev) : _pop(ev._pop) {} + void operator() (const parallel::range_t& r) const { + for (size_t i = r.begin(); i != r.end(); ++i) { + float d = 0.0f; + for (size_t j = 0; j < _pop.size(); ++j) + d += _pop[i]->dist(*_pop[j]); + d /= _pop.size(); + int l = _pop[i]->fit().objs().size() - 1; + assert(l > 0); + d += _pop[i]->fit().obj(l); + _pop[i]->fit().set_obj(l, d); + } + } + }; + } + + // ADD the mean distance to the population to the last objective (it + // DOESN'T add the objective automatically) + // you HAVE to initialize this value to a "good" one (depending on + // your constraints scheme) + // you phenotype/individual class must have a float dist(const + // Phen& o) method (the dist method must be thread-safe) + SFERES_CLASS(Diversity) { + public: + template + void apply(Ea& ea) { + // parallel compute + parallel::init(); + parallel::p_for(parallel::range_t(0, ea.pop().size()), + modifier_div::_parallel_div(ea.pop())); + } + }; + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/modif/dummy.hpp b/modules/dnns_easily_fooled/sferes/sferes/modif/dummy.hpp new file mode 100644 index 000000000..c51f50ee7 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/modif/dummy.hpp @@ -0,0 +1,54 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef MODIFIER_DUMMY_HPP +#define MODIFIER_DUMMY_HPP + +#include + +namespace sferes { + namespace modif { + SFERES_CLASS(Dummy) { + public: + template + void apply(Ea& ea) { + } + }; + } +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/parallel.hpp b/modules/dnns_easily_fooled/sferes/sferes/parallel.hpp new file mode 100644 index 000000000..04d55fc6a --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/parallel.hpp @@ -0,0 +1,119 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef PARALLEL_HPP_ +#define PARALLEL_HPP_ + +#ifndef NO_PARALLEL +#include +#include +#include +#include +#include +#endif + +// parallel for can be deactivated by defining NO_PARALLEL +// maximum of threads can be specified by defining NB_THREADS +namespace sferes { + namespace parallel { + +#ifndef NO_PARALLEL + typedef tbb::blocked_range range_t; + +#ifdef NB_THREADS + static void init() { + static tbb::task_scheduler_init init(NB_THREADS); + } +#else + static void init() { + static tbb::task_scheduler_init init; + } +#endif + + template + inline void p_for(const Range& range, const Body& body) { + tbb::parallel_for(range, body); + } + + template + inline void p_for(const Range& range, Body& body) { + tbb::parallel_for(range, body); + } + + + template + void sort(T1 i1, T2 i2, T3 comp) { + tbb::parallel_sort(i1, i2, comp); + } +#else + class PRange { + public: + PRange(size_t b, size_t e) : _begin(b), _end(e) {} + PRange(const PRange& o) : _begin(o._begin), _end(o._end) {} + size_t begin() const { + return _begin; + } + size_t end() const { + return _end; + } + protected: + size_t _begin, _end; + }; + typedef PRange range_t; + + static void init() {} + + template + inline void p_for(const Range& range, const Body& body) { + body(range); + } + // non const version + template + inline void p_for(const Range& range, Body& body) { + body(range); + } + + template + void sort(T1 i1, T2 i2, T3 comp) { + std::sort(i1, i2, comp); + } +#endif + }; + +} + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/phen/indiv.hpp b/modules/dnns_easily_fooled/sferes/sferes/phen/indiv.hpp new file mode 100644 index 000000000..7c4d6f92c --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/phen/indiv.hpp @@ -0,0 +1,118 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef INDIV_HPP_ +#define INDIV_HPP_ +#include +#include +#include +#include +#include + +#define SFERES_INDIV(Class, Parent) \ + template \ + class Class : public Parent, Exact>::ret> + + +namespace sferes { + namespace phen { + template + class Indiv { + public: + typedef Fit fit_t; + typedef Gen gen_t; + + Fit& fit() { + return _fit; + } + const Fit& fit() const { + return _fit; + } + + Gen& gen() { + return _gen; + } + const Gen& gen() const { + return _gen; + } + void mutate() { + dbg::trace trace("phen", DBG_HERE); + this->_gen.mutate(); + } + void cross(const boost::shared_ptr i2, + boost::shared_ptr& o1, + boost::shared_ptr& o2) { + dbg::trace trace("phen", DBG_HERE); + if (!o1) + o1 = boost::shared_ptr(new Exact()); + if (!o2) + o2 = boost::shared_ptr(new Exact()); + _gen.cross(i2->gen(), o1->gen(), o2->gen()); + } + void random() { + dbg::trace trace("phen", DBG_HERE); + this->_gen.random(); + } + void develop() { + dbg::trace trace("phen", DBG_HERE); + stc::exact(this)->develop(); + } + + template + void serialize(Archive & ar, const unsigned int version) { + dbg::trace trace("phen", DBG_HERE); + ar & BOOST_SERIALIZATION_NVP(_gen); + ar & BOOST_SERIALIZATION_NVP(_fit); + } + void show(std::ostream& os) { + os<<"nothing to show in a basic individual"< +#include +#include + +namespace sferes { + namespace phen { + SFERES_INDIV(Parameters, Indiv) { + + template + friend std::ostream& operator<<(std::ostream& output, const Parameters< G, F, P, E >& e); + public: +#ifdef EIGEN_CORE_H + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +#endif + Parameters() : _params((*this)._gen.size()) { } + typedef float type_t; + SFERES_CONST float max_p = Params::parameters::max; + SFERES_CONST float min_p = Params::parameters::min; + void develop() { + for (unsigned i = 0; i < _params.size(); ++i) + _params[i] = this->_gen.data(i) * (max_p - min_p) + min_p; + } + float data(size_t i) const { + assert(i < size()); + return _params[i]; + } + size_t size() const { + return _params.size(); + } + const std::vector& data() const { + return _params; + } + // squared Euclidean distance + float dist(const Parameters& params) const { + assert(params.size() == size()); + float d = 0.0f; + for (size_t i = 0; i < _params.size(); ++i) { + float x = _params[i] - params._params[i]; + d += x * x; + } + return d; + } + void show(std::ostream& os) const { + BOOST_FOREACH(float p, _params) + os< _params; + }; + template + std::ostream& operator<<(std::ostream& output, const Parameters< G, F, P, E >& e) { + for (size_t i = 0; i < e.size(); ++i) + output <<" "< +#include + +#include +#include +#include + +#include +#include + +namespace sferes { + + template + static void run_ea(int argc, + char **argv, + Ea& ea, + const boost::program_options::options_description& add_opts = + boost::program_options::options_description(), + bool init_rand = true) { + namespace po = boost::program_options; + std::cout<<"sferes2 version: "<(), "statistic number") + ("out,o", po::value(), "output file") + ("number,n", po::value(), "number in stat") + ("load,l", po::value(), "load a result file") + ("verbose,v", po::value >()->multitoken(), + "verbose output, available default streams : all, ea, fit, phen, trace") + ; + + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, desc), vm); + po::notify(vm); + + if (vm.count("help")) { + std::cout << desc << std::endl; + return; + } + if (vm.count("verbose")) { + dbg::init(); + std::vector streams = + vm["verbose"].as >(); + attach_ostream(dbg::warning, std::cout); + attach_ostream(dbg::error, std::cerr); + attach_ostream(dbg::info, std::cout); + bool all = std::find(streams.begin(), streams.end(), "all") != streams.end(); + bool trace = std::find(streams.begin(), streams.end(), "trace") != streams.end(); + if (all) { + streams.push_back("ea"); + streams.push_back("fit"); + streams.push_back("phen"); + streams.push_back("eval"); + } + BOOST_FOREACH(const std::string& s, streams) { + dbg::enable(dbg::all, s.c_str(), true); + dbg::attach_ostream(dbg::info, s.c_str(), std::cout); + if (trace) + dbg::attach_ostream(dbg::tracing, s.c_str(), std::cout); + } + if (trace) + attach_ostream(dbg::tracing, std::cout); + } + + parallel::init(); + if (vm.count("load")) { + ea.load(vm["load"].as()); + + if (!vm.count("out")) { + std::cerr<<"You must specifiy an out file"<(); + if (vm.count("number")) + n = vm["number"].as(); + std::ofstream ofs(vm["out"].as().c_str()); + ea.show_stat(stat, ofs, n); + } + } else + ea.run(); + } +} + + +#endif + + diff --git a/modules/dnns_easily_fooled/sferes/sferes/simu/simu.hpp b/modules/dnns_easily_fooled/sferes/sferes/simu/simu.hpp new file mode 100644 index 000000000..efaa61a58 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/simu/simu.hpp @@ -0,0 +1,74 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef SIMU_HPP_ +#define SIMU_HPP_ + +#include + +namespace sferes { + namespace simu { + SFERES_CLASS(Simu) { + public: + SFERES_CONST float dt = Params::simu::dt; + + Simu() {} + + // required + void init() { + stc::exact(this)->init(); + } + void refresh() { + stc::exact(this)->refresh(); + } + + // optional + void init_view() {} + void refresh_view() {} + protected: + }; + + SFERES_CLASS_D(SimuDummy, Simu) { + public: + void init() {} + void refresh() {} + }; + } +} + +#define SFERES_SIMU SFERES_CLASS_D +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/stat/best_fit.hpp b/modules/dnns_easily_fooled/sferes/sferes/stat/best_fit.hpp new file mode 100644 index 000000000..52ac21760 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/stat/best_fit.hpp @@ -0,0 +1,79 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef BEST_FIT_ +#define BEST_FIT_ + +#include +#include +#include +#include + +namespace sferes { + namespace stat { + // assume that the population is sorted ! + SFERES_STAT(BestFit, Stat) { + public: + template + void refresh(const E& ea) { + assert(!ea.pop().empty()); + _best = *ea.pop().begin(); + this->_create_log_file(ea, "bestfit.dat"); + if (ea.dump_enabled()) + (*this->_log_file) << ea.gen() << " " << _best->fit().value() << std::endl; + } + void show(std::ostream& os, size_t k) { + _best->develop(); + _best->show(os); + _best->fit().set_mode(fit::mode::view); + _best->fit().eval(*_best); + + + } + const boost::shared_ptr best() const { + return _best; + } + template + void serialize(Archive & ar, const unsigned int version) { + ar & BOOST_SERIALIZATION_NVP(_best); + } + protected: + boost::shared_ptr _best; + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/stat/mean_fit.hpp b/modules/dnns_easily_fooled/sferes/sferes/stat/mean_fit.hpp new file mode 100644 index 000000000..2ca50bc83 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/stat/mean_fit.hpp @@ -0,0 +1,71 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef MEAN_FIT_ +#define MEAN_FIT_ + +#include +#include +#include + +namespace sferes { + namespace stat { + SFERES_CLASS(MeanFit) { + public: + template + void refresh(const E& ea) { + float s = 0; + BOOST_FOREACH(boost::shared_ptr i, ea.pop()) + s += i->fit().value(); + _mean = s / ea.pop().size(); + } + void show(std::ostream& os, size_t k) const { + os<<"mean fit :"<<_mean< + void serialize(Archive & ar, const unsigned int version) { + ar & BOOST_SERIALIZATION_NVP(_mean); + } + protected: + float _mean; + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/stat/pareto_front.hpp b/modules/dnns_easily_fooled/sferes/sferes/stat/pareto_front.hpp new file mode 100644 index 000000000..1e14211c6 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/stat/pareto_front.hpp @@ -0,0 +1,100 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef PARETO_FRONT_HPP_ +#define PARETO_FRONT_HPP_ + +#include +#include +#include +#include +#include +#include + +namespace sferes { + namespace stat { + SFERES_STAT(ParetoFront, Stat) { + public: + typedef std::vector > pareto_t; + // asume a ea.pareto_front() method + template + void refresh(const E& ea) { + _pareto_front = ea.pareto_front(); + parallel::sort(_pareto_front.begin(), _pareto_front.end(), + fit::compare_objs_lex()); + this->_create_log_file(ea, "pareto.dat"); + if (ea.dump_enabled()) + show_all(*(this->_log_file), ea.gen()); + //this->_log_file->close(); + } + void show(std::ostream& os, size_t k) const { + os<<"log format : gen id obj_1 ... obj_n"<develop(); + _pareto_front[k]->show(os); + _pareto_front[k]->fit().set_mode(fit::mode::view); + _pareto_front[k]->fit().eval(*_pareto_front[k]); + os << "=> displaying individual " << k << std::endl; + os << "fit:"; + for (size_t i =0; i < _pareto_front[k]->fit().objs().size(); ++i) + os << _pareto_front[k]->fit().obj(i) << " "; + os << std::endl; + assert(k < _pareto_front.size()); + + } + const pareto_t& pareto_front() const { + return _pareto_front; + } + template + void serialize(Archive & ar, const unsigned int version) { + ar & BOOST_SERIALIZATION_NVP(_pareto_front); + } + void show_all(std::ostream& os, size_t gen = 0) const { + for (unsigned i = 0; i < _pareto_front.size(); ++i) { + os << gen << " " << i << " "; + for (unsigned j = 0; j < _pareto_front[i]->fit().objs().size(); ++j) + os << _pareto_front[i]->fit().obj(j) << " "; + os << std::endl;; + } + } + + protected: + pareto_t _pareto_front; + }; + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/stat/stat.hpp b/modules/dnns_easily_fooled/sferes/sferes/stat/stat.hpp new file mode 100644 index 000000000..afc03f81c --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/stat/stat.hpp @@ -0,0 +1,83 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef _STAT_HPP_ +#define _STAT_HPP_ + +#include +#include +#include +#include + +namespace sferes { + namespace stat { + template + class Stat { + public: + template + void refresh(const E& ea) { + assert(!ea.pop().empty()); + stc::exact(this)->refresh(ea); + } + void show(std::ostream& os, size_t k) { + } + template + void serialize(Archive & ar, const unsigned int version) { + } + protected: + boost::shared_ptr _log_file; + template + void _create_log_file(const E& ea, const std::string& name) { + if (!_log_file && ea.dump_enabled()) { + std::string log = ea.res_dir() + "/" + name; + _log_file = boost::shared_ptr(new std::ofstream(log.c_str())); + } + } + }; + + } +} + +#define SFERES_STAT(Class, Parent) \ + template \ + class Class : public Parent, Exact>::ret> + +#define SFERES_STAT_PARENT(Class, Parent) \ + Parent, Exact>::ret> + + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/stc.hpp b/modules/dnns_easily_fooled/sferes/sferes/stc.hpp new file mode 100644 index 000000000..de38a521a --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/stc.hpp @@ -0,0 +1,137 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef STC_HPP +#define STC_HPP +namespace stc { + + template + class Any { + }; + + template + Exact& exact(Any& ref) { + return *(Exact*)(void*)(&ref); + } + + template + const Exact& exact(const Any& cref) { + return *(const Exact*)(const void*)(&cref); + } + + template + Exact* exact(Any* ptr) { + return (Exact*)(void*)(ptr); + } + + template + const Exact* exact(const Any* cptr) { + return (const Exact*)(const void*)(cptr); + } + + struct Itself {}; + + // default version + template + struct FindExact { + typedef Exact ret; + }; + // version specialized for Exact=Itself + template + struct FindExact { + typedef T ret; + }; + struct _Params {}; + +} + +#define STC_FIND_EXACT(Type) typename stc::FindExact, Exact>::ret + + +// eq. class Class +#define STC_CLASS(Class) \ + template \ + class Class : public stc::Any + + + +// eq. class Class1 : public Parent +#define STC_CLASS_D(Class, Parent) \ + template \ + class Class : public Parent + + + +// return the parent class (eq. Class2) +#define STC_PARENT(Class, Parent) Parent + + +// eq. class Class +#define SFERES_CLASS(Class) \ + template \ + class Class : public stc::Any + +// eq. class Class1 : public Parent +#define SFERES_CLASS_D(Class, Parent) \ + template \ + class Class : public Parent, Exact>::ret> + +// to call the parent constructor +#define SFERES_PARENT(Class, Parent) Parent, Exact>::ret> + + + +// to simulate a static array (to be used in Param) +// from : SFERES_ARRAY(my_type, my_name, 0.2, 0.4) +// this generates 2 functions : +// - my_type my_name(size_t i) +// - size_t my_name_size() +// and a typedef my_type my_name_t +#define SFERES_ARRAY(T, A, ...) \ + static const T A(size_t i) \ + { assert(i < A##_size()); SFERES_CONST T _##A[] = { __VA_ARGS__ }; return _##A[i]; } \ + static const size_t A##_size() \ + { SFERES_CONST T _##A[] = { __VA_ARGS__ }; return sizeof(_##A) / sizeof(T); } \ + typedef T A##_t; + +// to simulate a string (to be used in Param) +#define SFERES_STRING(N, V) static const char* N() { return V; } + + +#define SFERES_CONST BOOST_STATIC_CONSTEXPR + +#endif diff --git a/modules/dnns_easily_fooled/sferes/sferes/wscript b/modules/dnns_easily_fooled/sferes/sferes/wscript new file mode 100644 index 000000000..acc26fa46 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/sferes/wscript @@ -0,0 +1,46 @@ +#! /usr/bin/env python +#| This file is a part of the sferes2 framework. +#| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +#| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +#| +#| This software is a computer program whose purpose is to facilitate +#| experiments in evolutionary computation and evolutionary robotics. +#| +#| This software is governed by the CeCILL license under French law +#| and abiding by the rules of distribution of free software. You +#| can use, modify and/ or redistribute the software under the terms +#| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +#| following URL "http://www.cecill.info". +#| +#| As a counterpart to the access to the source code and rights to +#| copy, modify and redistribute granted by the license, users are +#| provided only with a limited warranty and the software's author, +#| the holder of the economic rights, and the successive licensors +#| have only limited liability. +#| +#| In this respect, the user's attention is drawn to the risks +#| associated with loading, using, modifying and/or developing or +#| reproducing the software by the user in light of its specific +#| status of free software, that may mean that it is complicated to +#| manipulate, and that also therefore means that it is reserved for +#| developers and experienced professionals having in-depth computer +#| knowledge. Users are therefore encouraged to load and test the +#| software's suitability as regards their requirements in conditions +#| enabling the security of their systems and/or data to be ensured +#| and, more generally, to use and operate it in the same conditions +#| as regards security. +#| +#| The fact that you are presently reading this means that you have +#| had knowledge of the CeCILL license and that you accept its terms. + +def build(bld): + # sferes + sferes = bld.new_task_gen('cxx', 'staticlib') + sferes.source = 'dbg/dbg.cpp ea/cmaes.cpp' + sferes.includes = '. dbg' + sferes.target = 'sferes2' + sferes.want_libtool = 1 + sferes.uselib = 'BOOST BOOST_FILESYSTEM BOOST_SYSTEM BOOST_SERIALIZATION BOOST_PROGRAM_OPTIONS TBB' + mpi = bld.all_envs['default']['MPI_ENABLED'] + if mpi: + sferes.uselib += ' MPI BOOST_MPI' diff --git a/modules/dnns_easily_fooled/sferes/submit_jobs.py b/modules/dnns_easily_fooled/sferes/submit_jobs.py new file mode 100755 index 000000000..9bd3165ed --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/submit_jobs.py @@ -0,0 +1,111 @@ +#!/usr/bin/python +import os, commands, sys + +projectGroup = "GroupName" +numRuns = 1 +runName = "Run" +yourMail = "anguyen8@uwyo.edu" +runName = runName+'_' +numCores = 128 # A multiple of 16 +numNodes = str(numCores/16) + +pathCurrentDir = os.path.dirname(os.path.abspath(__file__)) # Path to current directory without trailing slash '/' +executable = pathCurrentDir + "/build/default/exp/images/images " + +YourInputParameter1 = str(10) +#options = "" +#options = "--your_option your_value --your_second_option $your_variable --your_third_variable " + YourInputParameter1 +options = "$seed_num " +scriptFileName = "launchScript.sh" + + +def printScriptFile(): + scriptFile = open(scriptFileName,'w',) + + #This will print the header of the launch script + scriptFile.write( "#!/bin/bash\n") + scriptFile.write( "\n") + scriptFile.write( "#Do not edit. File automatically generated\n") + scriptFile.write( "\n") + + #Write any modules, environment variables or other commands your program needs before it is being executed here + #Always load compiler module first. See command "module spider" for available modules + #scriptFile.write( "module load intel/14.0.0\n") + scriptFile.write( "module load gnu/4.8.2\n") + #scriptFile.write( "module load tbb/4.2.2\n") + scriptFile.write( "module load cuda/5.5\n") + scriptFile.write( "module load openmpi/1.6.5\n") + #scriptFile.write( "module load allinea\n") + + #Here we change to the directory where the experiment will be executed + #Note that experiment dir is a variable that is not defined here + scriptFile.write( "echo \"Changing to directory: \" $experimentDir\n") + scriptFile.write( "cd $experimentDir\n") + scriptFile.write( "\n") + + + #scriptFile.write( "ln -s ../config/voxelize\n") + + #Echo what we will execute to a file called runToBe + scriptFile.write( "echo \" " + executable + options + " > thisistheoutput 2> err.log\" > runToBe\n") + + #Actually execute your program + #scriptFile.write( "time " + executable + options + " > thisistheoutput 2> err.log\n") + scriptFile.write( "time mpirun --mca mpi_leave_pinned 0 --mca mpi_warn_on_fork 0 -np " + str(numCores) + " " + executable + options + " > thisistheoutput 2> err.log\n") + + #This will print the footer of the launch script + scriptFile.write( "\n") + scriptFile.write( "echo \"Done with run\"\n") + scriptFile.close() + + #Here we make the launch script executable + os.system("chmod +x " + scriptFileName) + +################ +# main starts here # +################ + +print 'Starting a batch of runs called: ' + runName + +printScriptFile() + +for i in range(0,numRuns): + #i += 1 + #Create a new directory for our run + runNumStr = str(i) + runDirShort = "run_" + runNumStr.zfill(numRuns/10) + # If there is a path.. continue + if os.path.isdir(runDirShort): + print runDirShort + " already exists. Abort!" + #sys.exit(3) + else: # Create a new run_x folder + command = "mkdir " + runDirShort + os.system(command) + + #Create the string to our new directory + pwd = commands.getoutput("pwd") + experimentDir = pwd + "/" + runDirShort + + #Set your own variables + variableThatShouldBeDifferentForEachRun = str(i) + + #Create the command that will submit your experiment + command = ("qsub -v seed_num=" + variableThatShouldBeDifferentForEachRun + + ",experimentDir=" + experimentDir + + " -m abe" + + " -l walltime=05:00:00:00" + #+ " -l mem=64gb" + + " -l nodes="+ numNodes +":ppn=16" + #+ " -l nodes=1:ppn=16:gpus=2:gpu" + + " -M " + yourMail + + " -A " + projectGroup + + " -o " + experimentDir + "/myOut" + + " -e " + experimentDir + "/myErr" + + " -N " + runName + str(i) + " " + scriptFileName) + print command + + #Note: qsub variable list is a comma-separated list without spaces between variables. + #Ex: qsub -v var1=a,var2=b + + #Launch your experiment + os.system(command) diff --git a/modules/dnns_easily_fooled/sferes/tbb.py b/modules/dnns_easily_fooled/sferes/tbb.py new file mode 100644 index 000000000..10a8a0ee2 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tbb.py @@ -0,0 +1,55 @@ +#! /usr/bin/env python +# encoding: utf-8 +# JB Mouret - 2009 + +""" +Quick n dirty tbb detection +""" + +import os, glob, types +import Options, Configure + + +def detect_tbb(conf): + env = conf.env + opt = Options.options + + if Options.options.no_tbb: + print "TBB (multicore) is disabled" + return 0 + if Options.options.tbb: + conf.env['CPPPATH_TBB'] = [Options.options.tbb + '/include'] + conf.env['LIBPATH_TBB'] = [Options.options.tbb + '/lib'] + else: + conf.env['CPPPATH_TBB'] = ['/opt/intel/include'] + conf.env['LIBPATH_TBB'] = ['/opt/intel/lib'] + + res = Configure.find_file('tbb/parallel_for.h', conf.env['CPPPATH_TBB']+['/usr/include', '/usr/local/include']) + conf.check_message('header','tbb/parallel_for.h', (res != '') , res) + if (res == '') : + return 0 + if Options.options.apple: + res = Configure.find_file('libtbb.dylib', conf.env['LIBPATH_TBB'] + ['/usr/lib', '/usr/local/lib']) + else: + res = Configure.find_file('libtbb.so', conf.env['LIBPATH_TBB'] + ['/usr/lib', '/usr/local/lib']) + + conf.check_message('library','libtbb', (res != ''), res) + if (res == '') : + return 0 + conf.env['LIB_TBB'] = ['tbb'] + return 1 + +def detect(conf): + tbb_found = detect_tbb(conf) + if tbb_found != 0: + conf.env['TBB_ENABLED'] = True + else: + conf.env['TBB_ENABLED'] = False + conf.env['LIB_TBB'] = [] + + return detect_tbb(conf) + +def set_options(opt): + opt.add_option('--tbb', type='string', help='path to tbb', dest='tbb') + opt.add_option('--no-tbb', default=False, action='store_true', + help='disable tbb (multicore)', dest='no_tbb') diff --git a/modules/dnns_easily_fooled/sferes/tests/check_serialize.hpp b/modules/dnns_easily_fooled/sferes/tests/check_serialize.hpp new file mode 100644 index 000000000..7a04ea811 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/check_serialize.hpp @@ -0,0 +1,107 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#ifndef TEST_SERIALIZE_HPP +#define TEST_SERIALIZE_HPP + +#include +#include +#include +#include +#include +#include +#include +#include + +// a generic serialize test to be used in test suites +namespace sferes { + namespace tests { + + // pass this if you don't want to write the equality test + struct check_nothing { + template + void operator()(const T& x1, const T& x2) const { + } + }; + + + template + void check_serialize(const T& src, T& dest, const CheckEqual &check_equal) { + BOOST_CHECK(true); + std::string filename = boost::archive::tmpdir(); + filename += "/serialize_g.xml"; + + BOOST_CHECK(true); + { + std::ofstream ofs(filename.c_str()); + std::cout<> boost::serialization::make_nvp("gen", dest); + BOOST_CHECK(true); + } + check_equal(src, dest); + } + template + void check_serialize(const T& src, T& dest, const CheckEqual &check_equal) { + typedef boost::archive::xml_oarchive oa_xml_t; + typedef boost::archive::xml_iarchive ia_xml_t; + typedef boost::archive::text_oarchive oa_text_t; + typedef boost::archive::text_iarchive ia_text_t; + typedef boost::archive::binary_oarchive oa_bin_t; + typedef boost::archive::binary_iarchive ia_bin_t; + + std::cout<<"XML archive"<(src, dest, check_equal); + std::cout<<"test archive"<(src, dest, check_equal); + std::cout<<"binary archive" <(src, dest, check_equal); + + } + } +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/tests/ea/cmaes.cpp b/modules/dnns_easily_fooled/sferes/tests/ea/cmaes.cpp new file mode 100644 index 000000000..1dea37a61 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/ea/cmaes.cpp @@ -0,0 +1,120 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef NO_PARALLEL +#define NO_PARALLEL +#endif + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE cmaes + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; + + +struct Params { + struct pop { + SFERES_CONST size_t size = 1;//not used by CMAES + SFERES_CONST unsigned nb_gen = 650; + SFERES_CONST int dump_period = -1; + }; + struct cmaes { + SFERES_CONST float sigma = 0.5f; + SFERES_CONST float max_value = -1e-10; + }; + + struct parameters { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + +float felli(const std::vector& xx) { + Eigen::VectorXf x = Eigen::VectorXf::Zero(xx.size()); + for (size_t i = 0; i < xx.size(); ++i) + x[i] = xx[i]; + Eigen::VectorXf v = Eigen::VectorXf::Zero(x.size()); + for (size_t i = 0; i < v.size(); ++i) + v[i] = powf(1e6, i / (x.size() - 1.0f)); + return v.dot((x.array() * x.array()).matrix()); +} + +SFERES_FITNESS(FitElli, sferes::fit::Fitness) { +public: + FitElli(const FitElli& f) { + assert(0); + BOOST_ERROR("copy constructors should be useless"); + } + FitElli& operator=(const FitElli& f) { + BOOST_ERROR("= operator should be useless"); + return *this; + } + FitElli() : _this(this) {} + template + void eval(Indiv& ind) { + this->_value = -felli(ind.data()); + } + FitElli* _this; +}; + + +BOOST_AUTO_TEST_CASE(test_cmaes) { + srand(time(0)); + typedef gen::Float<10, Params> gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Parallel eval_t; + typedef boost::fusion::vector > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::Cmaes ea_t; + ea_t ea; + + ea.run(); + float best = ea.stat<0>().best()->fit().value(); + std::cout<<"best fit (cmaes):"< -1e-3); + +} + diff --git a/modules/dnns_easily_fooled/sferes/tests/ea/dom_sort.cpp b/modules/dnns_easily_fooled/sferes/tests/ea/dom_sort.cpp new file mode 100644 index 000000000..a66a498df --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/ea/dom_sort.cpp @@ -0,0 +1,126 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + +#ifndef NO_PARALLEL +#define NO_PARALLEL +#endif + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE dom_sort + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params { + struct evo_float { + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float mutation_rate = 1.0f / 30.0f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct parameters { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + +SFERES_FITNESS(FitRand, sferes::fit::Fitness) { +public: + template + void eval(Indiv& ind) { + this->_objs.resize(3); + this->_objs[0] = sferes::misc::rand(10); + this->_objs[1] = sferes::misc::rand(10); + this->_objs[2] = sferes::misc::rand(10); + } +}; + + +BOOST_AUTO_TEST_CASE(test_domsort) { + srand(time(0)); + typedef gen::EvoFloat<30, Params> gen_t; + typedef phen::Parameters, Params> phen_t; + typedef boost::shared_ptr pphen_t; + typedef std::vector pop_t; + + pop_t pop; + for (size_t i = 0; i < 2000; ++i) { + boost::shared_ptr ind(new phen_t()); + ind->random(); + ind->fit().eval(*ind); + pop.push_back(ind); + } + // basic + std::vector fronts_basic; + std::vector ranks; + boost::timer tbasic; + ea::dom_sort_basic(pop, fronts_basic, ranks); + std::cout << "dom sort basic (2000 indivs):" + << tbasic.elapsed() << " s" << std::endl; + // standard + std::vector fronts; + boost::timer tstd; + ea::dom_sort(pop, fronts, ranks); + std::cout << "dom sort deb (2000 indivs):" + << tstd.elapsed() << " s" << std::endl; + BOOST_CHECK_EQUAL(fronts.size(), fronts_basic.size()); + for (size_t i = 0; i < fronts.size(); ++i) { + BOOST_CHECK_EQUAL(fronts[i].size(), fronts_basic[i].size()); + std::sort(fronts[i].begin(), fronts[i].end()); + std::sort(fronts_basic[i].begin(), fronts_basic[i].end()); + for (size_t j = 0; j < fronts[i].size(); ++j) { + BOOST_CHECK_EQUAL(fronts[i][j]->fit().obj(0), + fronts_basic[i][j]->fit().obj(0)); + BOOST_CHECK_EQUAL(fronts[i][j]->fit().obj(1), + fronts_basic[i][j]->fit().obj(1)); + } + } +} diff --git a/modules/dnns_easily_fooled/sferes/tests/ea/eps_moea.cpp b/modules/dnns_easily_fooled/sferes/tests/ea/eps_moea.cpp new file mode 100644 index 000000000..70ebbb3b9 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/ea/eps_moea.cpp @@ -0,0 +1,128 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE eps_moea + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params { + struct evo_float { + + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float mutation_rate = 1.0f/30.0f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 20.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop { + SFERES_CONST unsigned size = 100; + SFERES_CONST int dump_period = -1; + SFERES_ARRAY(float, eps, 0.0075f, 0.0075f); + SFERES_ARRAY(float, min_fit, 0.0f, 0.0f); + SFERES_CONST size_t grain = size / 4; + SFERES_CONST unsigned nb_gen = 60000 / grain; + }; + struct parameters { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + +template +float _g(const Indiv &ind) { + float g = 0.0f; + assert(ind.size() == 30); + for (size_t i = 1; i < 30; ++i) + g += ind.data(i); + g = 9.0f * g / 29.0f; + g += 1.0f; + return g; +} + +SFERES_FITNESS(FitZDT2, sferes::fit::Fitness) { +public: + template + void eval(Indiv& ind) { + this->_objs.resize(2); + float f1 = ind.data(0); + float g = _g(ind); + float h = 1.0f - powf((f1 / g), 2.0f); + float f2 = g * h; + this->_objs[0] = -f1; + this->_objs[1] = -f2; + } +}; + + +BOOST_AUTO_TEST_CASE(test_epsmoea) { + srand(time(0)); + + typedef gen::EvoFloat<30, Params> gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Parallel eval_t; + typedef boost::fusion::vector > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::EpsMOEA ea_t; + ea_t ea; + + ea.run(); + + ea.stat<0>().show_all(std::cout, 0); + BOOST_CHECK(ea.stat<0>().pareto_front().size() >= 101); + std::cout<<"elite size :"<().pareto_front().size()< p, ea.stat<0>().pareto_front()) { + BOOST_CHECK(_g(*p) < 1.1); + } + +} + diff --git a/modules/dnns_easily_fooled/sferes/tests/ea/nsga2.cpp b/modules/dnns_easily_fooled/sferes/tests/ea/nsga2.cpp new file mode 100644 index 000000000..40a76cf0a --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/ea/nsga2.cpp @@ -0,0 +1,140 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#define NO_PARALLEL +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE nsga2 + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params { + struct evo_float { + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float mutation_rate = 1.0f / 30.0f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop { + SFERES_CONST unsigned size = 100; + SFERES_CONST unsigned nb_gen = 500; + SFERES_CONST float initial_aleat = 2.0f; + SFERES_CONST int dump_period = -1; + }; + struct parameters { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + +template +float _g(const Indiv &ind) { + float g = 0.0f; + assert(ind.size() == 30); + for (size_t i = 1; i < 30; ++i) + g += ind.data(i); + g = 9.0f * g / 29.0f; + g += 1.0f; + return g; +} + +SFERES_FITNESS(FitZDT2, sferes::fit::Fitness) { +public: + FitZDT2(const FitZDT2& f) { + assert(0); + BOOST_ERROR("copy constructors should be useless"); + } + FitZDT2& operator=(const FitZDT2& f) { + BOOST_ERROR("= operator should be useless"); + return *this; + } + FitZDT2() : _this(this) {} + template + void eval(Indiv& ind) { + assert(this == _this); + this->_objs.resize(2); + float f1 = ind.data(0); + float g = _g(ind); + float h = 1.0f - pow((f1 / g), 2.0); + float f2 = g * h; + this->_objs[0] = -f1; + this->_objs[1] = -f2; + } + FitZDT2* _this; +}; + + +BOOST_AUTO_TEST_CASE(test_nsga2) { + srand(time(0)); + dbg::out(dbg::info)<<"running ex_ea ..."< gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Parallel eval_t; + typedef boost::fusion::vector > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::Nsga2 ea_t; + ea_t ea; + + ea.run(); + + ea.stat<0>().show_all(std::cout, 0); + BOOST_CHECK(ea.stat<0>().pareto_front().size() > 50); + + BOOST_FOREACH(boost::shared_ptr p, ea.stat<0>().pareto_front()) { + std::cout<<_g(*p)< 0.0); + + } + +} + diff --git a/modules/dnns_easily_fooled/sferes/tests/ea/rank_simple.cpp b/modules/dnns_easily_fooled/sferes/tests/ea/rank_simple.cpp new file mode 100644 index 000000000..68f4031a4 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/ea/rank_simple.cpp @@ -0,0 +1,122 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE rank_simple2 + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params { + struct evo_float { + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float mutation_rate = 0.5f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop { + SFERES_CONST unsigned size = 100; + SFERES_CONST unsigned nb_gen = 300; + SFERES_CONST float initial_aleat = 2.0f; + SFERES_CONST int dump_period = -1; + SFERES_CONST float coeff = 1.1f; + SFERES_CONST float keep_rate = 0.6f; + }; + struct parameters { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + +template +float _g(const Indiv &ind) { + float g = 0.0f; + assert(ind.size() == 30); + for (size_t i = 1; i < 30; ++i) + g += ind.data(i); + g = 9.0f * g / 29.0f; + g += 1.0f; + return g; +} + +SFERES_FITNESS(FitZDT2, sferes::fit::Fitness) { +public: + FitZDT2() : _this(this) {} + template + void eval(Indiv& ind) { + assert(this == _this); + this->_objs.resize(2); + float f1 = ind.data(0); + float g = _g(ind); + float h = 1.0f - pow((f1 / g), 2.0); + float f2 = g * h; + this->_objs[0] = -f1; + this->_objs[1] = -f2; + this->_value = -f1 -f2; + } + FitZDT2* _this; +}; + + +BOOST_AUTO_TEST_CASE(test_rank_simple2) { + srand(time(0)); + typedef gen::EvoFloat<30, Params> gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Parallel eval_t; + typedef boost::fusion::vector > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::RankSimple ea_t; + ea_t ea; + + ea.run(); + +} + diff --git a/modules/dnns_easily_fooled/sferes/tests/eval/mpi.cpp b/modules/dnns_easily_fooled/sferes/tests/eval/mpi.cpp new file mode 100644 index 000000000..ecd872bf1 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/eval/mpi.cpp @@ -0,0 +1,118 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE parallel +#include +#include +#if 0 +//#ifdef MPI_ENABLED + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + + +struct Params { + struct evo_float { + + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop { + SFERES_CONST unsigned size = 100; + SFERES_CONST unsigned nb_gen = 1000; + SFERES_CONST int dump_period = -1; + SFERES_CONST int initial_aleat = 1; + SFERES_CONST float coeff = 1.1f; + SFERES_CONST float keep_rate = 0.6f; + }; + struct parameters { + SFERES_CONST float min = -10.0f; + SFERES_CONST float max = 10.0f; + }; +}; + +SFERES_FITNESS(FitTest, sferes::fit::Fitness) { +public: + template + void eval(Indiv& ind) { + float v = 0; + for (unsigned i = 0; i < ind.size(); ++i) { + float p = ind.data(i); + v += p * p * p * p; + } + this->_value = -v; + } +}; + +BOOST_AUTO_TEST_CASE(test_mpi) { + dbg::out(dbg::info)<<"running test_mpi ..."< gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Mpi eval_t; + typedef boost::fusion::vector, stat::MeanFit > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::RankSimple ea_t; + ea_t ea; + ea.run(); + + std::cout<<"==> best fitness ="<().best()->fit().value()< mean fitness ="<().mean()< + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params { + struct evo_float { + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop { + SFERES_CONST unsigned size = 100; + SFERES_CONST unsigned nb_gen = 1000; + SFERES_CONST int dump_period = -1; + SFERES_CONST int initial_aleat = 1; + SFERES_CONST float coeff = 1.1f; + SFERES_CONST float keep_rate = 0.6f; + }; + struct parameters { + SFERES_CONST float min = -10.0f; + SFERES_CONST float max = 10.0f; + }; +}; + +SFERES_FITNESS(FitTest, sferes::fit::Fitness) { +public: + template + void eval(Indiv& ind) { + float v = 0; + for (unsigned i = 0; i < ind.size(); ++i) { + float p = ind.data(i); + v += p * p * p * p; + } + this->_value = -v; + } +}; + + +BOOST_AUTO_TEST_CASE(test_parallel) { + dbg::out(dbg::info)<<"running ex_ea ..."< gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Parallel eval_t; + typedef boost::fusion::vector, stat::MeanFit > stat_t; + typedef modif::Dummy<> modifier_t; + typedef ea::RankSimple ea_t; + ea_t ea; + + ea.run(); + + std::cout<<"==> best fitness ="<().best()->fit().value()< mean fitness ="<().mean()< +#include +#include +using namespace sferes::gen; + + +template +void test() { + for (unsigned i = 0; i < 30; ++i) { + BitString gen1; + BitString gen2; + BitString gen3; + BitString gen4; + BOOST_CHECK(gen1.data(0) < 1.0f); + BOOST_CHECK(gen1.data(0) >= 0.0f); + BOOST_CHECK_CLOSE(gen1.data(0), gen1.int_data(0), 0.0001); + BOOST_CHECK_CLOSE(gen2.data(0), gen2.int_data(0), 0.0001); + gen1.random(); + gen2.random(); + gen1.mutate(); + gen1.cross(gen2, gen3, gen4); + + + } +} + +struct Params1 { + struct bit_string { + SFERES_CONST size_t nb_bits = 8; + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float mutation_rate_bit = 0.1f; + }; +}; + + +struct Params2 { + struct bit_string { + SFERES_CONST size_t nb_bits = 50; + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float mutation_rate_bit = 0.1f; + }; +}; + + + +BOOST_AUTO_TEST_CASE(bitstring) { + test<10, Params1>(); + +} + +struct check_bitstring_eq { + template + void operator()(const T& gen1, const T& gen2) const { + BOOST_CHECK_EQUAL(gen1.size(), gen2.size()); + for (size_t i = 0; i < gen1.size(); ++i) + BOOST_CHECK(fabs(gen1.data(i) - gen2.data(i) < 0.001)); + } +}; + +BOOST_AUTO_TEST_CASE(bitstring_serialize) { + BitString<10, Params1> gen1, gen2; + gen1.random(); + sferes::tests::check_serialize(gen1, gen2, check_bitstring_eq()); +} + + + + +BOOST_AUTO_TEST_CASE(bitstring_serialize_long) { + BitString<10, Params2> gen1, gen2; + gen1.random(); + sferes::tests::check_serialize(gen1, gen2, check_bitstring_eq()); +} + + diff --git a/modules/dnns_easily_fooled/sferes/tests/gen/cmaes.cpp b/modules/dnns_easily_fooled/sferes/tests/gen/cmaes.cpp new file mode 100644 index 000000000..31845ed34 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/gen/cmaes.cpp @@ -0,0 +1,75 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE CMAES_gen + +#ifdef EIGEN3_ENABLED + +#include +#include +#include + + +struct Params1 { + struct es { + }; +}; + + + +struct check_es_eq { + template + void operator()(const T& gen1, const T& gen2) const { + BOOST_CHECK_EQUAL(gen1.size(), gen2.size()); + for (size_t i = 0; i < gen1.size(); ++i) + BOOST_CHECK(fabs(gen1.data(i) - gen2.data(i) < 1e-10)); + } +}; + +BOOST_AUTO_TEST_CASE(es) { + + sferes::gen::Cmaes<10, Params1> gen1, gen2; + gen1.random(); + sferes::tests::check_serialize(gen1, gen2, check_es_eq()); +} +#else +#warning EIGEN3 is disabled -> no CMAES +int main() { + return 0; +} +#endif diff --git a/modules/dnns_easily_fooled/sferes/tests/gen/evo_float.cpp b/modules/dnns_easily_fooled/sferes/tests/gen/evo_float.cpp new file mode 100644 index 000000000..fe14945bc --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/gen/evo_float.cpp @@ -0,0 +1,123 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE evo_float + +#include +#include +#include + +using namespace sferes::gen; +using namespace sferes::gen::evo_float; + + +template +void test() { + for (unsigned i = 0; i < 30; ++i) { + EvoFloat gen1; + EvoFloat gen2; + EvoFloat gen3; + EvoFloat gen4; + gen1.random(); + gen1.mutate(); + gen1.cross(gen2, gen3, gen4); + } +} + +struct Params1 { + struct evo_float { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 15.0f; + }; +}; + + +BOOST_AUTO_TEST_CASE(polynomial_sbx) { + test<10, Params1>(); +} + +struct check_evofloat_eq { + template + void operator()(const T& gen1, const T& gen2) const { + BOOST_CHECK_EQUAL(gen1.size(), gen2.size()); + for (size_t i = 0; i < gen1.size(); ++i) + BOOST_CHECK(fabs(gen1.data(i) - gen2.data(i) < 0.001)); + } +}; + +BOOST_AUTO_TEST_CASE(polynomial_sbx_serialize) { + EvoFloat<10, Params1> gen1, gen2; + gen1.random(); + sferes::tests::check_serialize(gen1, gen2, check_evofloat_eq()); +} + +struct Params2 { + struct evo_float { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = gaussian; + SFERES_CONST cross_over_t cross_over_type = recombination; + SFERES_CONST float sigma = 0.3f; + SFERES_CONST float eta_c = 15.0f; + }; +}; + + +BOOST_AUTO_TEST_CASE(gaussian_recomb) { + test<10, Params2>(); +} + +struct Params3 { + struct evo_float { + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.1f; + SFERES_CONST mutation_t mutation_type = uniform; + SFERES_CONST cross_over_t cross_over_type = sbx; + SFERES_CONST float max = 0.3f; + SFERES_CONST float eta_c = 15.0f; + }; +}; + +BOOST_AUTO_TEST_CASE(uniform_sbx) { + test<10, Params3>(); +} + diff --git a/modules/dnns_easily_fooled/sferes/tests/gen/sampled.cpp b/modules/dnns_easily_fooled/sferes/tests/gen/sampled.cpp new file mode 100644 index 000000000..b20891253 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/gen/sampled.cpp @@ -0,0 +1,106 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE sampled + +#include +#include +#include + +struct Params1 { + struct sampled { + SFERES_ARRAY(float, values, 0, 1, 2, 3, 4); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; +}; + +struct Params2 { + struct sampled { + SFERES_ARRAY(int, values, 0, 1, 2, 3, 4); + SFERES_CONST float mutation_rate = 0.1f; + SFERES_CONST float cross_rate = 0.25f; + SFERES_CONST bool ordered = false; + }; +}; + +template +void test() { + typedef sferes::gen::Sampled<10, P> gen_t; + gen_t g[4]; + for (size_t k = 0; k < 4; ++k) { + g[k].random(); + g[k].mutate(); + g[k].mutate(); + } + g[0].cross(g[1], g[2], g[3]); + for (size_t k = 0; k < 4; ++k) + for (size_t i = 0; i < g[k].size(); ++i) + BOOST_CHECK(g[k].data(i) == 0 + || g[k].data(i) == 1 + || g[k].data(i) == 2 + || g[k].data(i) == 3 + || g[k].data(i) == 4); + +} + + +struct check_sampled_eq { + template + void operator()(const T& gen1, const T& gen2) const { + BOOST_CHECK_EQUAL(gen1.size(), gen2.size()); + for (size_t i = 0; i < gen1.size(); ++i) + BOOST_CHECK_EQUAL(gen1.data(i), gen2.data(i)); + } +}; + +BOOST_AUTO_TEST_CASE(polynomial_sbx_serialize) { + sferes::gen::Sampled<100, Params1> gen1, gen2; + gen1.random(); + sferes::tests::check_serialize(gen1, gen2, check_sampled_eq()); +} + +BOOST_AUTO_TEST_CASE(sampled_gen_ordered) { + test(); +} + +BOOST_AUTO_TEST_CASE(sampled_gen_unordered) { + test(); +} + diff --git a/modules/dnns_easily_fooled/sferes/tests/modif/diversity.cpp b/modules/dnns_easily_fooled/sferes/tests/modif/diversity.cpp new file mode 100644 index 000000000..f6fe99219 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/modif/diversity.cpp @@ -0,0 +1,138 @@ +//| This file is a part of the sferes2 framework. +//| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +//| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +//| +//| This software is a computer program whose purpose is to facilitate +//| experiments in evolutionary computation and evolutionary robotics. +//| +//| This software is governed by the CeCILL license under French law +//| and abiding by the rules of distribution of free software. You +//| can use, modify and/ or redistribute the software under the terms +//| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +//| following URL "http://www.cecill.info". +//| +//| As a counterpart to the access to the source code and rights to +//| copy, modify and redistribute granted by the license, users are +//| provided only with a limited warranty and the software's author, +//| the holder of the economic rights, and the successive licensors +//| have only limited liability. +//| +//| In this respect, the user's attention is drawn to the risks +//| associated with loading, using, modifying and/or developing or +//| reproducing the software by the user in light of its specific +//| status of free software, that may mean that it is complicated to +//| manipulate, and that also therefore means that it is reserved for +//| developers and experienced professionals having in-depth computer +//| knowledge. Users are therefore encouraged to load and test the +//| software's suitability as regards their requirements in conditions +//| enabling the security of their systems and/or data to be ensured +//| and, more generally, to use and operate it in the same conditions +//| as regards security. +//| +//| The fact that you are presently reading this means that you have +//| had knowledge of the CeCILL license and that you accept its terms. + + + + +#define NO_PARALLEL +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE nsga2_diversity + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace sferes; +using namespace sferes::gen::evo_float; + +struct Params { + struct evo_float { + + SFERES_CONST float cross_rate = 0.5f; + SFERES_CONST float mutation_rate = 0.5f; + SFERES_CONST float eta_m = 15.0f; + SFERES_CONST float eta_c = 10.0f; + SFERES_CONST mutation_t mutation_type = polynomial; + SFERES_CONST cross_over_t cross_over_type = sbx; + }; + struct pop { + SFERES_CONST unsigned size = 200; + SFERES_CONST unsigned nb_gen = 600; + SFERES_CONST float initial_aleat = 2.0f; + SFERES_CONST int dump_period = -1; + }; + struct parameters { + SFERES_CONST float min = 0.0f; + SFERES_CONST float max = 1.0f; + }; +}; + +template +float _g(const Indiv &ind) { + float g = 0.0f; + assert(ind.size() == 30); + for (size_t i = 1; i < 30; ++i) + g += ind.data(i); + g = 9.0f * g / 29.0f; + g += 1.0f; + return g; +} + +SFERES_FITNESS(FitZDT2, sferes::fit::Fitness) { +public: + FitZDT2(const FitZDT2& f) { + assert(0); + BOOST_ERROR("copy constructors should be useless"); + } + FitZDT2& operator=(const FitZDT2& f) { + BOOST_ERROR("= operator should be useless"); + return *this; + } + FitZDT2() : _this(this) {} + template + void eval(Indiv& ind) { + assert(this == _this); + this->_objs.resize(3);// resize for div + float f1 = ind.data(0); + float g = _g(ind); + float h = 1.0f - pow((f1 / g), 2.0); + float f2 = g * h; + this->_objs[0] = -f1; + this->_objs[1] = -f2; + } + FitZDT2* _this; +}; + + +BOOST_AUTO_TEST_CASE(test_nsga2) { + srand(time(0)); + dbg::out(dbg::info)<<"running ex_ea ..."< gen_t; + typedef phen::Parameters, Params> phen_t; + typedef eval::Parallel eval_t; + typedef boost::fusion::vector > stat_t; + typedef modif::Diversity<> modifier_t; + typedef ea::Nsga2 ea_t; + ea_t ea; + + ea.run(); + + ea.stat<0>().show_all(std::cout, 0); + BOOST_CHECK(ea.stat<0>().pareto_front().size() > 50); + + BOOST_FOREACH(boost::shared_ptr p, ea.stat<0>().pareto_front()) { + BOOST_CHECK_EQUAL(p->fit().objs().size(), 3); + } + +} + diff --git a/modules/dnns_easily_fooled/sferes/tests/serialize_g.xml b/modules/dnns_easily_fooled/sferes/tests/serialize_g.xml new file mode 100644 index 000000000..2c97fc4cb Binary files /dev/null and b/modules/dnns_easily_fooled/sferes/tests/serialize_g.xml differ diff --git a/modules/dnns_easily_fooled/sferes/tests/wscript b/modules/dnns_easily_fooled/sferes/tests/wscript new file mode 100644 index 000000000..6cbb771fe --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/tests/wscript @@ -0,0 +1,54 @@ +#! /usr/bin/env python +#| This file is a part of the sferes2 framework. +#| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +#| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +#| +#| This software is a computer program whose purpose is to facilitate +#| experiments in evolutionary computation and evolutionary robotics. +#| +#| This software is governed by the CeCILL license under French law +#| and abiding by the rules of distribution of free software. You +#| can use, modify and/ or redistribute the software under the terms +#| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +#| following URL "http://www.cecill.info". +#| +#| As a counterpart to the access to the source code and rights to +#| copy, modify and redistribute granted by the license, users are +#| provided only with a limited warranty and the software's author, +#| the holder of the economic rights, and the successive licensors +#| have only limited liability. +#| +#| In this respect, the user's attention is drawn to the risks +#| associated with loading, using, modifying and/or developing or +#| reproducing the software by the user in light of its specific +#| status of free software, that may mean that it is complicated to +#| manipulate, and that also therefore means that it is reserved for +#| developers and experienced professionals having in-depth computer +#| knowledge. Users are therefore encouraged to load and test the +#| software's suitability as regards their requirements in conditions +#| enabling the security of their systems and/or data to be ensured +#| and, more generally, to use and operate it in the same conditions +#| as regards security. +#| +#| The fact that you are presently reading this means that you have +#| had knowledge of the CeCILL license and that you accept its terms. + +import os, glob, types + +def build(bld): + # set boost log level + os.environ['BOOST_TEST_LOG_LEVEL'] = 'all' + # compile & run every *.cpp automagically + files = glob.glob("tests/*/*.cpp") + for f in files: +# if not f.find('mpi'): + obj = bld.new_task_gen(features='cxx cprogram') + obj.source = '../' + f + obj.includes = '../' + fname = f.replace('/', '_') + fname = fname[0:-4] + obj.target = fname + obj.unit_test = 1 + obj.uselib_local = 'sferes2' + obj.uselib = 'TBB BOOST BOOST_UNIT_TEST_FRAMEWORK EIGEN3' + obj.cxxflags = ['-std=c++11'] diff --git a/modules/dnns_easily_fooled/sferes/waf b/modules/dnns_easily_fooled/sferes/waf new file mode 100755 index 000000000..b3aae1385 Binary files /dev/null and b/modules/dnns_easily_fooled/sferes/waf differ diff --git a/modules/dnns_easily_fooled/sferes/wscript b/modules/dnns_easily_fooled/sferes/wscript new file mode 100644 index 000000000..f6f230542 --- /dev/null +++ b/modules/dnns_easily_fooled/sferes/wscript @@ -0,0 +1,283 @@ +#! /usr/bin/env python +#| This file is a part of the sferes2 framework. +#| Copyright 2009, ISIR / Universite Pierre et Marie Curie (UPMC) +#| Main contributor(s): Jean-Baptiste Mouret, mouret@isir.fr +#| +#| This software is a computer program whose purpose is to facilitate +#| experiments in evolutionary computation and evolutionary robotics. +#| +#| This software is governed by the CeCILL license under French law +#| and abiding by the rules of distribution of free software. You +#| can use, modify and/ or redistribute the software under the terms +#| of the CeCILL license as circulated by CEA, CNRS and INRIA at the +#| following URL "http://www.cecill.info". +#| +#| As a counterpart to the access to the source code and rights to +#| copy, modify and redistribute granted by the license, users are +#| provided only with a limited warranty and the software's author, +#| the holder of the economic rights, and the successive licensors +#| have only limited liability. +#| +#| In this respect, the user's attention is drawn to the risks +#| associated with loading, using, modifying and/or developing or +#| reproducing the software by the user in light of its specific +#| status of free software, that may mean that it is complicated to +#| manipulate, and that also therefore means that it is reserved for +#| developers and experienced professionals having in-depth computer +#| knowledge. Users are therefore encouraged to load and test the +#| software's suitability as regards their requirements in conditions +#| enabling the security of their systems and/or data to be ensured +#| and, more generally, to use and operate it in the same conditions +#| as regards security. +#| +#| The fact that you are presently reading this means that you have +#| had knowledge of the CeCILL license and that you accept its terms. + + +import Options +import copy +import os, glob, types +import sferes +import tbb +import sys +import commands +import TaskGen, Task, Utils +from Constants import RUN_ME +import unittestw, Utils +import Configure + +#VERSION='0.'+commands.getoutput('git rev-parse HEAD') +VERSION='0.1' +APPNAME='sferes2' + +srcdir = '.' +blddir = 'build' + +modules = sferes.parse_modules() + +def init(): + pass + +def set_options(opt): + # tools + opt.tool_options('compiler_cxx') + opt.tool_options('boost') + opt.tool_options('tbb') + opt.tool_options('mpi') + opt.tool_options('eigen3') + opt.tool_options('unittest') + + # sferes specific + opt.add_option('--bullet', type='string', help='path to bullet', dest='bullet') + opt.add_option('--apple', type='string', help='enable apple support', dest='apple') + opt.add_option('--rpath', type='string', help='set rpath', dest='rpath') + opt.add_option('--includes', type='string', help='add an include path, e.g. /home/mandor/include', dest='includes') + opt.add_option('--libs', type='string', help='add a lib path, e.g. /home/mandor/lib', dest='libs') + opt.add_option('--cpp11', type='string', help='force c++-11 compilation [--cpp11=yes]', dest='cpp11') + + # exp commands + opt.add_option('--create', type='string', help='create a new exp', dest='create_exp') + opt.add_option('--exp', type='string', help='exp to build', dest='exp') + opt.add_option('--launch', type='string', help='config file to launch', dest='launch') + opt.add_option('--time_travel', type='string', help='config file to time-travel', dest='time_travel') + opt.add_option('--kill', type='string', help='config file to kill', dest='kill') + opt.add_option('--status', type='string', help='config file to status', dest='status') + opt.add_option('--qsub', type='string', help='config file (json) to submit to torque', dest='qsub') + opt.add_option('--ll', type='string', help='config file (json) to submit to loadleveler', dest='loadleveler') + + for i in modules: + print 'module : [' + i + ']' + opt.sub_options(i) + + +def configure(conf): + # log configure options + fname = blddir + '/configure.options' + args = open(fname, 'a') + for i in sys.argv: + args.write(i + ' ') + args.write("\n") + args.close() + + conf.check_tool('compiler_cxx') + + common_flags = "-D_REENTRANT -Wall -fPIC -ftemplate-depth-1024 -Wno-sign-compare -Wno-deprecated -Wno-unused " + if Options.options.cpp11 and Options.options.cpp11 == 'yes': + common_flags += '-std=c++11 ' + + # boost + conf.check_tool('boost') + conf.check_boost(lib='serialization filesystem system unit_test_framework program_options graph mpi python thread', + min_version='1.35') + # tbb + conf.check_tool('tbb') + + # mpi.h + mpi_found = conf.check_tool('mpi') + + # boost mpi + if (len(conf.env['LIB_BOOST_MPI']) != 0 and conf.env['MPI_FOUND']): + conf.env['MPI_ENABLED'] = True + else: + conf.env['MPI_ENABLED'] = False + + # sdl (optional) + sdl = conf.check_cfg(package='sdl', + args='--cflags --libs', + msg="Checking for SDL (optional)", + uselib_store='SDL', + mandatory=False) + if sdl: common_flags += '-DUSE_SDL ' + + conf.env['CCDEFINES_SDL_gfx']=['_GNU_SOURCE=1', '_REENTRANT'] + conf.env['CPPPATH_SDL_gfx']=['/usr/include/SDL'] + conf.env['LIBPATH_SDL_gfx']=['/usr/lib'] + conf.env['CXXDEFINES_SDL_gfx']=['_GNU_SOURCE=1', '_REENTRANT'] + conf.env['LIB_SDL_gfx']=['SDL_gfx'] + conf.env['HAVE_SDL_gfx']=1 + + # eigen 3 (optional) + eigen3_found = conf.check_tool('eigen3') + + # ode (optiona) + ode_found = conf.check_tool('ode') + + # gsl (optional) + conf.check_cfg(package='gsl', + args='--cflags --libs', + msg="Checking for GSL (optional)", + uselib_store='GSL', + mandatory=False) + + # bullet (optional) + conf.env['LIB_BULLET'] = ['bulletdynamics', 'bulletcollision', 'bulletmath'] + if Options.options.bullet : + conf.env['LIBPATH_BULLET'] = Options.options.bullet + '/lib' + conf.env['CPPPATH_BULLET'] = Options.options.bullet + '/src' + + # osg (optional) + conf.env['LIB_OSG'] = ['osg', 'osgDB', 'osgUtil', + 'osgViewer', 'OpenThreads', + 'osgFX', 'osgShadow'] + + + # Mac OS specific options + if Options.options.apple and Options.options.apple == 'yes': + common_flags += ' -Wno-gnu-static-float-init ' + + conf.env['LIB_TCMALLOC'] = 'tcmalloc' + conf.env['LIB_PTMALLOC'] = 'ptmalloc3' + + conf.env['LIB_EFENCE'] = 'efence' + conf.env['LIB_BZIP2'] = 'bz2' + conf.env['LIB_ZLIB'] = 'z' + + conf.env['LIBPATH_OPENGL'] = '/usr/X11R6/lib' + conf.env['LIB_OPENGL'] = ['GL', 'GLU', 'glut'] + + if Options.options.rpath: + conf.env.append_value("LINKFLAGS", "--rpath="+Options.options.rpath) + + # modules + for i in modules: + conf.sub_config(i) + + # link flags + if Options.options.libs: + conf.env.append_value("LINKFLAGS", "-L" + Options.options.libs) + + if Options.options.includes : + common_flags += " -I" + Options.options.includes + ' ' + if conf.env['MPI_ENABLED']: + common_flags += '-DMPI_ENABLED ' + if not conf.env['TBB_ENABLED']: + common_flags += '-DNO_PARALLEL ' + if conf.env['EIGEN3_FOUND']: + common_flags += '-DEIGEN3_ENABLED ' + + common_flags += "-DSFERES_ROOT=\"" + os.getcwd() + "\" " + + cxxflags = conf.env['CXXFLAGS'] + # release + conf.setenv('default') + opt_flags = common_flags + ' -DNDEBUG -O3 -ffast-math' + + conf.env['CXXFLAGS'] = cxxflags + opt_flags.split(' ') + conf.env['SFERES_ROOT'] = os.getcwd() + + # debug + env = conf.env.copy() + env.set_variant('debug') + conf.set_env_name('debug', env) + conf.setenv('debug') + debug_flags = common_flags + '-O1 -ggdb3 -DDBG_ENABLED' + conf.env['CXXFLAGS'] = cxxflags + debug_flags.split(' ') + + # display flags + def flat(list) : + str = "" + for i in list : + str += i + ' ' + return str + print '\n--- configuration ---' + print 'compiler:' + print' * CXX: ' + str(conf.env['CXX_NAME']) + print 'boost version: ' + str(conf.env['BOOST_VERSION']) + print 'mpi: ' + str(conf.env['MPI_ENABLED']) + print "Compilation flags :" + conf.setenv('default') + print " * default:" + print " CXXFLAGS : " + flat(conf.env['CXXFLAGS']) + print " LINKFLAGS: " + flat(conf.env['LINKFLAGS']) + conf.setenv('debug') + print " * debug:" + print " CXXFLAGS : " + flat(conf.env['CXXFLAGS']) + print " LINKFLAGS: " + flat(conf.env['LINKFLAGS']) + print " " + print "--- license ---" + print "Sferes2 is distributed under the CECILL license (GPL-compatible)" + print "Please check the accompagnying COPYING file or http://www.cecill.info/" + +def build(bld): + #v = commands.getoutput('git rev-parse HEAD') + v = VERSION + bld.env_of_name('default')['CXXFLAGS'].append("-DVERSION=\"(const char*)\\\""+v+"\\\"\"") + bld.env_of_name('debug')['CXXFLAGS'].append("-DVERSION=\"(const char*)\\\""+v+"\\\"\"") + + print ("Entering directory `" + os.getcwd() + "'") + bld.add_subdirs('sferes examples tests') + if Options.options.exp: + print 'Building exp: ' + Options.options.exp + bld.add_subdirs('exp/' + Options.options.exp) + for i in modules: + bld.add_subdirs(i) + for obj in copy.copy(bld.all_task_gen): + new_obj = obj.clone('debug') + bld.add_post_fun(unittestw.summary) + +def shutdown (): + if Options.options.create_exp: + sferes.create_exp(Options.options.create_exp) + if Options.options.launch: + sferes.launch_exp(Options.options.launch) + if Options.options.status: + sferes.status(Options.options.status) + if Options.options.time_travel: + sferes.time_travel(Options.options.time_travel) + if Options.options.kill: + sferes.kill(Options.options.kill) + if Options.options.qsub: + sferes.qsub(Options.options.qsub) + if Options.options.loadleveler: + sferes.loadleveler(Options.options.loadleveler) + + +def check(self): + os.environ["BOOST_TEST_CATCH_SYSTEM_ERRORS"]="no" + os.environ["BOOST_TEST_LOG_LEVEL"]="test_suite" + ut = unittestw.unit_test() + ut.change_to_testfile_dir = True + ut.want_to_see_test_output = True + ut.want_to_see_test_error = True + ut.run() + ut.print_results()