From ac27a626af5886ba9115ff54042fb6106e0fa465 Mon Sep 17 00:00:00 2001 From: Jie Zhang Date: Mon, 20 Mar 2017 14:18:32 +0800 Subject: [PATCH] V0.4.0 (#27) * update batch norm layer * optim max pooling * expose profiler api * list network blobs * remove cudnn v4 support * test buffer io * find cuda arch for vs2015 --- README.md | 4 +- cmake/Cuda.cmake | 22 +++++--- include/caffe/c_api.h | 25 ++++++++++ include/caffe/logging.hpp | 2 +- include/caffe/profiler.hpp | 2 +- java/.gitignore | 1 + .../java/com/luoyetx/minicaffe/Utils.java | 47 +++++++++++++++++ java/src/test/java/MiniCaffeTest.java | 5 ++ python/.gitignore | 1 + python/minicaffe/__init__.py | 1 + python/minicaffe/net.py | 24 ++++++++- python/minicaffe/profiler.py | 50 +++++++++++++++++++ python/tests/test.py | 12 +++++ src/c_api.cpp | 31 ++++++++++++ src/jni/jni.c | 30 +++++++++++ src/layers/cudnn/cudnn.hpp | 10 ---- src/layers/cudnn/cudnn_relu_layer.cu | 9 ---- src/layers/cudnn/cudnn_sigmoid_layer.cu | 9 ---- src/layers/cudnn/cudnn_tanh_layer.cu | 9 ---- src/layers/pooling_layer.cpp | 7 +-- tests/run_net.c | 19 +++++++ tests/run_net.cpp | 19 +++++++ 22 files changed, 286 insertions(+), 53 deletions(-) create mode 100644 python/minicaffe/profiler.py diff --git a/README.md b/README.md index 9c066a2..9d60ada 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ If you don't use Ubuntu, then you may need to install OpenBLAS and protobuf thro Mini-Caffe now can be cross compiled for Android platform, checkout the document [here](android). -### With CUDA and CUDNN +### With CUDA and CUDNN support Install [CUDA8](https://developer.nvidia.com/cuda-downloads) and [cuDNN5.1](https://developer.nvidia.com/cudnn) in your system, then we can compile Mini-Caffe with GPU support. Run CMake command below. @@ -89,4 +89,4 @@ To use Mini-Caffe as a library, you may refer to [example](example). ### How to profile your network -The Profiler in Mini-Caffe can help you profile your network performance, see docs [here](profile.md) +The Profiler in Mini-Caffe can help you profile your network performance, see docs [here](profile.md). diff --git a/cmake/Cuda.cmake b/cmake/Cuda.cmake index 02d057e..db61ef7 100644 --- a/cmake/Cuda.cmake +++ b/cmake/Cuda.cmake @@ -42,15 +42,21 @@ function(caffe_detect_installed_gpus out_variable) " return 0;\n" "}\n") if(MSVC) - # Add directory of "cl.exe" to system path, otherwise "nvcc --run" will fail with "Cannot find compiler 'cl.exe' in PATH" - get_filename_component(CL_DIR ${CMAKE_C_COMPILER} DIRECTORY) - set(ENV{PATH} "$ENV{PATH};${CL_DIR}") + #find vcvarsall.bat and run it building msvc environment + get_filename_component(MY_COMPILER_DIR ${CMAKE_CXX_COMPILER} DIRECTORY) + find_file(MY_VCVARSALL_BAT vcvarsall.bat "${MY_COMPILER_DIR}/.." "${MY_COMPILER_DIR}/../..") + execute_process(COMMAND ${MY_VCVARSALL_BAT} && ${CUDA_NVCC_EXECUTABLE} -arch sm_30 --run ${__cufile} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/" + RESULT_VARIABLE __nvcc_res OUTPUT_VARIABLE __nvcc_out + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) + else() + execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} -arch sm_30 --run ${__cufile} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/" + RESULT_VARIABLE __nvcc_res OUTPUT_VARIABLE __nvcc_out + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) endif() - execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "--run" "${__cufile}" - WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/" - RESULT_VARIABLE __nvcc_res OUTPUT_VARIABLE __nvcc_out - ERROR_QUIET - OUTPUT_STRIP_TRAILING_WHITESPACE) if(__nvcc_res EQUAL 0) # nvcc outputs text containing line breaks when building with MSVC. diff --git a/include/caffe/c_api.h b/include/caffe/c_api.h index 18530fa..76ca70d 100644 --- a/include/caffe/c_api.h +++ b/include/caffe/c_api.h @@ -105,6 +105,31 @@ CAFFE_API int CaffeNetListParam(NetHandle net, const char ***names, BlobHandle **params); +// Profiler, don't enable Profiler in multi-thread Env + +/*! + * \brief enable profiler + */ +CAFFE_API int CaffeProfilerEnable(); +/*! + * \brief disable profiler + */ +CAFFE_API int CaffeProfilerDisable(); +/*! + * \brief open a scope on profiler + * \param name scope name + */ +CAFFE_API int CaffeProfilerScopeStart(const char *name); +/*! + * \brief close a scope + */ +CAFFE_API int CaffeProfilerScopeEnd(); +/*! + * \brief dump profile data to file + * \param fn file name or path + */ +CAFFE_API int CaffeProfilerDump(const char *fn); + // Helper /*! diff --git a/include/caffe/logging.hpp b/include/caffe/logging.hpp index ec6d13c..d98015a 100644 --- a/include/caffe/logging.hpp +++ b/include/caffe/logging.hpp @@ -208,7 +208,7 @@ class LogMessageFatal { #else ~LogMessageFatal() noexcept(false) { #endif - // LOG(ERROR) << log_stream_.str(); + LOG(ERROR) << log_stream_.str(); throw Error(log_stream_.str()); } std::ostringstream &stream() { return log_stream_; } diff --git a/include/caffe/profiler.hpp b/include/caffe/profiler.hpp index 370d579..60e1b40 100644 --- a/include/caffe/profiler.hpp +++ b/include/caffe/profiler.hpp @@ -9,7 +9,7 @@ namespace caffe { /*! - * \brief Profiler for Caffe + * \brief Profiler for Caffe, don't enable Profiler in Multi-thread Env * This class is used to profile a range of source code as a scope. * The basic usage is like below. * diff --git a/java/.gitignore b/java/.gitignore index 4f3b431..06bd00e 100644 --- a/java/.gitignore +++ b/java/.gitignore @@ -14,3 +14,4 @@ gradle-app.setting # gradle/wrapper/gradle-wrapper.properties *.log +*.json diff --git a/java/src/main/java/com/luoyetx/minicaffe/Utils.java b/java/src/main/java/com/luoyetx/minicaffe/Utils.java index 5c2b599..064668c 100644 --- a/java/src/main/java/com/luoyetx/minicaffe/Utils.java +++ b/java/src/main/java/com/luoyetx/minicaffe/Utils.java @@ -28,9 +28,56 @@ public static void SetCaffeMode(int mode, int device) { throw new RuntimeException(GetLastError()); } } + /** + * enable profiler + */ + public static void EnableProfiler() { + if (jniProfilerEnable() != 0) { + throw new RuntimeException(GetLastError()); + } + } + /** + * disable profiler + */ + public static void DisableProfiler() { + if (jniProfilerDisable() != 0) { + throw new RuntimeException(GetLastError()); + } + } + /** + * open a scope on profiler + * @param naem scope name + */ + public static void OpenScope(String name) { + if (jniProfilerScopeStart(name) != 0) { + throw new RuntimeException(GetLastError()); + } + } + /** + * close a scope + */ + public static void CloseScope() { + if (jniProfilerScopeEnd() != 0) { + throw new RuntimeException(GetLastError()); + } + } + /** + * dump profiler data to file + * @param fn file path + */ + public static void DumpProfile(String fn) { + if (jniProfilerDump(fn) != 0) { + throw new RuntimeException(GetLastError()); + } + } private static native String jniGetLastError(); private static native int jniGPUAvailable(); private static native int jniSetMode(int mode, int device); + private static native int jniProfilerEnable(); + private static native int jniProfilerDisable(); + private static native int jniProfilerScopeStart(String name); + private static native int jniProfilerScopeEnd(); + private static native int jniProfilerDump(String fn); static { System.loadLibrary("caffe"); diff --git a/java/src/test/java/MiniCaffeTest.java b/java/src/test/java/MiniCaffeTest.java index 5fd54ae..5a37c47 100644 --- a/java/src/test/java/MiniCaffeTest.java +++ b/java/src/test/java/MiniCaffeTest.java @@ -16,10 +16,15 @@ public class MiniCaffeTest { } else { System.out.println("Use CPU to run model"); } + Utils.EnableProfiler(); System.out.println("Create NIN"); Net net = new Net("../build/model/nin.prototxt", "../build/model/nin.caffemodel"); + Utils.OpenScope("nin"); testForward(net); + Utils.CloseScope(); + Utils.DisableProfiler(); + Utils.DumpProfile("nin-profile.json"); // test create from buffer System.out.println("Create ResNet from buffer"); try { diff --git a/python/.gitignore b/python/.gitignore index 0ce91dd..c8a989e 100644 --- a/python/.gitignore +++ b/python/.gitignore @@ -3,3 +3,4 @@ dist build *.pyc +*.json diff --git a/python/minicaffe/__init__.py b/python/minicaffe/__init__.py index 4d83b23..413d1f6 100644 --- a/python/minicaffe/__init__.py +++ b/python/minicaffe/__init__.py @@ -3,5 +3,6 @@ from .net import Net from .base import check_gpu_available, set_runtime_mode from .craft import LayerCrafter +from .profiler import Profiler __version__ = '0.4.0' diff --git a/python/minicaffe/net.py b/python/minicaffe/net.py index 6b4c465..044d2ff 100644 --- a/python/minicaffe/net.py +++ b/python/minicaffe/net.py @@ -2,8 +2,8 @@ # pylint: disable=invalid-name """Net represents caffe::Net in C++""" from __future__ import absolute_import -import ctypes from collections import defaultdict +import ctypes from .base import LIB from .base import c_str, py_str, check_call from .base import NetHandle, BlobHandle @@ -51,6 +51,28 @@ def get_blob(self, name): check_call(LIB.CaffeNetGetBlob(self.handle, c_str(name), ctypes.byref(handle))) return Blob(handle) + @property + def blobs(self): + """return network internal blobs + + Returns + ------- + blobs: dict(name: Blob) + network internal blobs with their name + """ + ctypes_n = ctypes.c_int32() + ctypes_names = ctypes.POINTER(ctypes.c_char_p)() + ctypes_blobs = ctypes.POINTER(BlobHandle)() + check_call(LIB.CaffeNetListBlob(self.handle, ctypes.byref(ctypes_n), + ctypes.byref(ctypes_names), + ctypes.byref(ctypes_blobs))) + blobs = dict() + for i in range(ctypes_n.value): + name = py_str(ctypes_names[i]) + blob = Blob(BlobHandle(ctypes_blobs[i])) + blobs[name] = blob + return blobs + @property def params(self): """return network params diff --git a/python/minicaffe/profiler.py b/python/minicaffe/profiler.py new file mode 100644 index 0000000..4ef3faf --- /dev/null +++ b/python/minicaffe/profiler.py @@ -0,0 +1,50 @@ +# coding = utf-8 +# pylint: disable=invalid-name +"""Profiler in mini-caffe""" +from .base import LIB +from .base import c_str, check_call + + +class Profiler(object): + """Profiler + """ + + @staticmethod + def enable(): + """enable profiler + """ + check_call(LIB.CaffeProfilerEnable()) + + @staticmethod + def disable(): + """disable profiler + """ + check_call(LIB.CaffeProfilerDisable()) + + @staticmethod + def open_scope(name): + """open a scope on profiler + + Parameters + ---------- + name: string + scope name + """ + check_call(LIB.CaffeProfilerScopeStart(c_str(name))) + + @staticmethod + def close_scope(): + """close a scope on profiler + """ + check_call(LIB.CaffeProfilerScopeEnd()) + + @staticmethod + def dump(fn): + """dump profiler data to fn + + Parameters + ---------- + fn: string + file path to save profiler data + """ + check_call(LIB.CaffeProfilerDump(c_str(fn))) diff --git a/python/tests/test.py b/python/tests/test.py index f2d4698..089dc1e 100644 --- a/python/tests/test.py +++ b/python/tests/test.py @@ -39,9 +39,11 @@ def test_network(): # check gpu available if mcaffe.check_gpu_available(): mcaffe.set_runtime_mode(1, 0) + mcaffe.Profiler.enable() # set up network net = mcaffe.Net(os.path.join(model_dir, 'resnet.prototxt'), os.path.join(model_dir, 'resnet.caffemodel')) + mcaffe.Profiler.open_scope("resnet") blob = net.get_blob('data') shape = blob.shape size = 1 @@ -53,6 +55,9 @@ def test_network(): net.forward() t1 = time.clock() t = (t1 - t0) * 1000 + mcaffe.Profiler.close_scope() + mcaffe.Profiler.disable() + mcaffe.Profiler.dump("resnet-profile.json") print('Forward ResNet costs %f ms'%t) # network parameters params = net.params @@ -62,6 +67,13 @@ def test_network(): shape = param.shape print('\t%s: [%d, %d, %d, %d]'%(name, shape[0], shape[1], shape[2], shape[3])) print('}') + # network internal blobs + blobs = net.blobs + print('{') + for name, blob in list(blobs.items()): + shape = blob.shape + print('\t%s: [%d, %d, %d, %d]'%(name, shape[0], shape[1], shape[2], shape[3])) + print('}') if __name__ == '__main__': diff --git a/src/c_api.cpp b/src/c_api.cpp index 74ec97f..b735530 100644 --- a/src/c_api.cpp +++ b/src/c_api.cpp @@ -3,6 +3,7 @@ #include "caffe/c_api.h" #include "caffe/blob.hpp" #include "caffe/net.hpp" +#include "caffe/profiler.hpp" // ThreadLocal Template @@ -129,6 +130,36 @@ int CaffeNetGetBlob(NetHandle net, const char *name, BlobHandle *blob) { API_END(); } +int CaffeProfilerEnable() { + API_BEGIN(); + caffe::Profiler::Get()->TurnON(); + API_END(); +} + +int CaffeProfilerDisable() { + API_BEGIN(); + caffe::Profiler::Get()->TurnOFF(); + API_END(); +} + +int CaffeProfilerScopeStart(const char *name) { + API_BEGIN(); + caffe::Profiler::Get()->ScopeStart(name); + API_END(); +} + +int CaffeProfilerScopeEnd() { + API_BEGIN(); + caffe::Profiler::Get()->ScopeEnd(); + API_END(); +} + +int CaffeProfilerDump(const char *fn) { + API_BEGIN(); + caffe::Profiler::Get()->DumpProfile(fn); + API_END(); +} + struct BlobsEntry { std::vector vec_charp; std::vector vec_handle; diff --git a/src/jni/jni.c b/src/jni/jni.c index f9f9c8b..54cf305 100644 --- a/src/jni/jni.c +++ b/src/jni/jni.c @@ -157,3 +157,33 @@ CaffeJNIMethod(Utils, SetMode, jint)(JNIEnv *env, jobject thiz, jint mode, jint device) { return CaffeSetMode(mode, device); } + +CaffeJNIMethod(Utils, ProfilerEnable, jint)(JNIEnv *env, jobject thiz) { + return CaffeProfilerEnable(); +} + +CaffeJNIMethod(Utils, ProfilerDisable, jint)(JNIEnv *env, jobject thiz) { + return CaffeProfilerDisable(); +} + +CaffeJNIMethod(Utils, ProfilerScopeStart, jint)(JNIEnv *env, jobject thiz, + jstring name) { + const char *name_cstr = (*env)->GetStringUTFChars(env, name, NULL); + CHECK_SUCCESS(CaffeProfilerScopeStart(name_cstr), { + (*env)->ReleaseStringUTFChars(env, name, name_cstr); + }); + return 0; +} + +CaffeJNIMethod(Utils, ProfilerScopeEnd, jint)(JNIEnv *env, jobject thiz) { + return CaffeProfilerScopeEnd(); +} + +CaffeJNIMethod(Utils, ProfilerDump, jint)(JNIEnv *env, jobject thiz, + jstring fn) { + const char *fn_cstr = (*env)->GetStringUTFChars(env, fn, NULL); + CHECK_SUCCESS(CaffeProfilerDump(fn_cstr), { + (*env)->ReleaseStringUTFChars(env, fn, fn_cstr); + }); + return 0; +} diff --git a/src/layers/cudnn/cudnn.hpp b/src/layers/cudnn/cudnn.hpp index b3641b5..77217be 100644 --- a/src/layers/cudnn/cudnn.hpp +++ b/src/layers/cudnn/cudnn.hpp @@ -83,13 +83,8 @@ inline void setTensor4dDesc(cudnnTensorDescriptor_t* desc, inline void createFilterDesc(cudnnFilterDescriptor_t* desc, int n, int c, int h, int w) { CUDNN_CHECK(cudnnCreateFilterDescriptor(desc)); -#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnSetFilter4dDescriptor(*desc, dataType::type, CUDNN_TENSOR_NCHW, n, c, h, w)); -#else - CUDNN_CHECK(cudnnSetFilter4dDescriptor_v4(*desc, dataType::type, - CUDNN_TENSOR_NCHW, n, c, h, w)); -#endif } inline void createConvolutionDesc(cudnnConvolutionDescriptor_t* conv) { @@ -117,13 +112,8 @@ inline void createPoolingDesc(cudnnPoolingDescriptor_t* pool_desc, LOG(FATAL) << "Unknown pooling method."; } CUDNN_CHECK(cudnnCreatePoolingDescriptor(pool_desc)); -#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnSetPooling2dDescriptor(*pool_desc, *mode, CUDNN_PROPAGATE_NAN, h, w, pad_h, pad_w, stride_h, stride_w)); -#else - CUDNN_CHECK(cudnnSetPooling2dDescriptor_v4(*pool_desc, *mode, - CUDNN_PROPAGATE_NAN, h, w, pad_h, pad_w, stride_h, stride_w)); -#endif } inline void createActivationDescriptor(cudnnActivationDescriptor_t* activ_desc, diff --git a/src/layers/cudnn/cudnn_relu_layer.cu b/src/layers/cudnn/cudnn_relu_layer.cu index 5c38460..0d8b012 100644 --- a/src/layers/cudnn/cudnn_relu_layer.cu +++ b/src/layers/cudnn/cudnn_relu_layer.cu @@ -13,21 +13,12 @@ void CuDNNReLULayer::Forward_gpu(const vector& bottom, const real_t* bottom_data = bottom[0]->gpu_data(); real_t* top_data = top[0]->mutable_gpu_data(); -#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationForward(this->handle_, activ_desc_, cudnn::dataType::one, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->top_desc_, top_data)); -#else - CUDNN_CHECK(cudnnActivationForward_v4(this->handle_, - activ_desc_, - cudnn::dataType::one, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->top_desc_, top_data)); -#endif } } // namespace caffe diff --git a/src/layers/cudnn/cudnn_sigmoid_layer.cu b/src/layers/cudnn/cudnn_sigmoid_layer.cu index b0c4bab..e96b217 100644 --- a/src/layers/cudnn/cudnn_sigmoid_layer.cu +++ b/src/layers/cudnn/cudnn_sigmoid_layer.cu @@ -8,21 +8,12 @@ void CuDNNSigmoidLayer::Forward_gpu(const vector& bottom, const vector& top) { const real_t* bottom_data = bottom[0]->gpu_data(); real_t* top_data = top[0]->mutable_gpu_data(); -#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationForward(this->handle_, activ_desc_, cudnn::dataType::one, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->top_desc_, top_data)); -#else - CUDNN_CHECK(cudnnActivationForward_v4(this->handle_, - activ_desc_, - cudnn::dataType::one, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->top_desc_, top_data)); -#endif } } // namespace caffe diff --git a/src/layers/cudnn/cudnn_tanh_layer.cu b/src/layers/cudnn/cudnn_tanh_layer.cu index 8c2dd4a..68d76da 100644 --- a/src/layers/cudnn/cudnn_tanh_layer.cu +++ b/src/layers/cudnn/cudnn_tanh_layer.cu @@ -10,21 +10,12 @@ void CuDNNTanHLayer::Forward_gpu(const vector& bottom, const vector& top) { const real_t* bottom_data = bottom[0]->gpu_data(); real_t* top_data = top[0]->mutable_gpu_data(); -#if CUDNN_VERSION_MIN(5, 0, 0) CUDNN_CHECK(cudnnActivationForward(this->handle_, activ_desc_, cudnn::dataType::one, this->bottom_desc_, bottom_data, cudnn::dataType::zero, this->top_desc_, top_data)); -#else - CUDNN_CHECK(cudnnActivationForward_v4(this->handle_, - activ_desc_, - cudnn::dataType::one, - this->bottom_desc_, bottom_data, - cudnn::dataType::zero, - this->top_desc_, top_data)); -#endif } } // namespace caffe diff --git a/src/layers/pooling_layer.cpp b/src/layers/pooling_layer.cpp index 6bc4c70..0bfa091 100644 --- a/src/layers/pooling_layer.cpp +++ b/src/layers/pooling_layer.cpp @@ -125,7 +125,6 @@ void PoolingLayer::Forward_cpu(const vector& bottom, // loop to save time, although this results in more code. switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: - caffe_set(top_count, static_cast(-FLT_MAX), top_data); // The main loop for (int n = 0; n < bottom[0]->num(); ++n) { for (int c = 0; c < channels_; ++c) { @@ -137,13 +136,15 @@ void PoolingLayer::Forward_cpu(const vector& bottom, int wend = min(wstart + kernel_w_, width_); hstart = max(hstart, 0); wstart = max(wstart, 0); - const int pool_index = ph * pooled_width_ + pw; + real_t top_val = -FLT_MAX; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int index = h * width_ + w; - top_data[pool_index] = max(top_data[pool_index], bottom_data[index]); + top_val = max(top_val, bottom_data[index]); } } + const int pool_index = ph * pooled_width_ + pw; + top_data[pool_index] = top_val; } } // compute offset diff --git a/tests/run_net.c b/tests/run_net.c index 44e2ed4..9ddeb05 100644 --- a/tests/run_net.c +++ b/tests/run_net.c @@ -71,5 +71,24 @@ int main(int argc, char *argv[]) { CHECK(CaffeNetCreate("no-such-prototxt", "no-such-caffemodel", &net) == -1); printf("%s\n", CaffeGetLastError()); + // create network from buffer + FILE *fin = fopen("model/resnet.prototxt", "r"); + fseek(fin, 0, SEEK_END); + long prototxt_size = ftell(fin); + fseek(fin, 0, SEEK_SET); + char *prototxt = malloc(prototxt_size); + fread(prototxt, 1, prototxt_size, fin); + fclose(fin); + fin = fopen("model/resnet.caffemodel", "rb"); + fseek(fin, 0, SEEK_END); + long caffemodel_size = ftell(fin); + fseek(fin, 0, SEEK_SET); + char *caffemodel = malloc(caffemodel_size); + fread(caffemodel, 1, caffemodel_size, fin); + fclose(fin); + CHECK_SUCCESS(CaffeNetCreateFromBuffer(prototxt, prototxt_size, + caffemodel, caffemodel_size, + &net)); + CHECK_SUCCESS(CaffeNetDestroy(net)); return 0; } diff --git a/tests/run_net.cpp b/tests/run_net.cpp index b6be779..35886ea 100644 --- a/tests/run_net.cpp +++ b/tests/run_net.cpp @@ -2,6 +2,8 @@ #include #include #include +#include +#include #include #include @@ -212,5 +214,22 @@ int main(int argc, char *argv[]) { // dump profile data profiler->TurnOFF(); profiler->DumpProfile("profile.json"); + + // test IO + ifstream fin("model/resnet.prototxt"); + stringstream buffer; + buffer << fin.rdbuf(); + fin.close(); + string prototxt = buffer.str(); + shared_ptr network_param = ReadTextNetParameterFromBuffer(prototxt.c_str(), prototxt.length()); + fin.open("model/resnet.caffemodel", ios::binary); + buffer.str(""); + buffer.clear(); + buffer << fin.rdbuf(); + fin.close(); + string caffemodel = buffer.str(); + shared_ptr model_param = ReadBinaryNetParameterFromBuffer(caffemodel.c_str(), caffemodel.length()); + Net net(*network_param); + net.CopyTrainedLayersFrom(*model_param); return 0; }