Skip to content

Commit 60fe2d7

Browse files
authored
Merge pull request #487 from drnikolaev/caffe-0.17
0.17.0 Release
2 parents f58deb3 + 9433cea commit 60fe2d7

File tree

183 files changed

+37048
-863
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

183 files changed

+37048
-863
lines changed

.travis.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ compiler: gcc
66

77
env:
88
global:
9-
- NUM_THREADS=4
9+
- NUM_THREADS=8
1010
matrix:
1111
# Use a build matrix to test many builds in parallel
1212
# envvar defaults:

CMakeLists.txt

+2-3
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,8 @@ endif()
1010
project(Caffe C CXX)
1111

1212
# ---[ Caffe version
13-
set(CAFFE_TARGET_VERSION "0.16.6")
14-
set(CAFFE_TARGET_SOVERSION "0.16")
13+
set(CAFFE_TARGET_VERSION "0.17.0")
14+
set(CAFFE_TARGET_SOVERSION "0.17")
1515
add_definitions(-DCAFFE_VERSION=${CAFFE_TARGET_VERSION})
1616

1717
# Skip `typedef __half half;`
@@ -53,7 +53,6 @@ caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE)
5353
caffe_option(BUILD_python_layer "Build the Caffe Python layer" ON)
5454
caffe_option(USE_LEVELDB "Build with levelDB" ON)
5555
caffe_option(USE_LMDB "Build with lmdb" ON)
56-
caffe_option(ALLOW_LMDB_NOLOCK "Allow MDB_NOLOCK when reading LMDB files (only if necessary)" OFF)
5756
caffe_option(TEST_FP16 "Build Caffe Tests with 16 bit mode included" OFF)
5857
caffe_option(NO_NVML "Build Caffe Tests without NVML (i.e. no CPU affinity)" OFF)
5958

LICENSE

+6
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
COPYRIGHT
22

3+
All changes from Caffe SSD (https://github.com/weiliu89/caffe/tree/ssd)
4+
Copyright (c) 2015, 2016 Wei Liu (UNC Chapel Hill), Dragomir Anguelov (Zoox),
5+
Dumitru Erhan (Google), Christian Szegedy (Google), Scott Reed (UMich Ann Arbor),
6+
Cheng-Yang Fu (UNC Chapel Hill), Alexander C. Berg (UNC Chapel Hill).
7+
All rights reserved.
8+
39
All contributions by the University of California:
410
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
511
All rights reserved.

Makefile

+4-7
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ LIBRARY_NAME := $(PROJECT)$(LIBRARY_NAME_SUFFIX)
3535
LIB_BUILD_DIR := $(BUILD_DIR)/lib
3636
STATIC_NAME := $(LIB_BUILD_DIR)/lib$(LIBRARY_NAME).a
3737
DYNAMIC_VERSION_MAJOR := 0
38-
DYNAMIC_VERSION_MINOR := 16
39-
DYNAMIC_VERSION_REVISION := 6
38+
DYNAMIC_VERSION_MINOR := 17
39+
DYNAMIC_VERSION_REVISION := 0
4040
DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so
4141
DYNAMIC_SONAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR)
4242
DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_SONAME_SHORT).$(DYNAMIC_VERSION_REVISION)
@@ -216,7 +216,7 @@ ifeq ($(USE_OPENCV), 1)
216216
LIBRARIES += opencv_core opencv_highgui opencv_imgproc
217217

218218
ifeq ($(OPENCV_VERSION), 3)
219-
LIBRARIES += opencv_imgcodecs
219+
LIBRARIES += opencv_imgcodecs opencv_videoio
220220
endif
221221

222222
endif
@@ -292,7 +292,7 @@ ifeq ($(LINUX), 1)
292292
endif
293293
# boost::thread is reasonably called boost_thread (compare OS X)
294294
# We will also explicitly add stdc++ to the link target.
295-
LIBRARIES += boost_thread stdc++
295+
LIBRARIES += boost_thread boost_regex stdc++
296296
VERSIONFLAGS += -Wl,-soname,$(DYNAMIC_SONAME_SHORT) -Wl,-rpath,$(ORIGIN)/../lib
297297
endif
298298

@@ -376,9 +376,6 @@ ifeq ($(USE_LEVELDB), 1)
376376
endif
377377
ifeq ($(USE_LMDB), 1)
378378
COMMON_FLAGS += -DUSE_LMDB
379-
ifeq ($(ALLOW_LMDB_NOLOCK), 1)
380-
COMMON_FLAGS += -DALLOW_LMDB_NOLOCK
381-
endif
382379
endif
383380

384381
# New place for HDF5

README.md

+5-3
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,19 @@ Here are the major features:
1313
* **Mixed-precision support**. It allows to store and/or compute data in either
1414
64, 32 or 16 bit formats. Precision can be defined for every layer (forward and
1515
backward passes might be different too), or it can be set for the whole Net.
16+
* **Layer-wise Adaptive Rate Control (LARC) and adaptive global gradient scaler** for better
17+
accuracy, especially in 16-bit training.
1618
* **Integration with [cuDNN](https://developer.nvidia.com/cudnn) v7**.
1719
* **Automatic selection of the best cuDNN convolution algorithm**.
1820
* **Integration with v2.2 of [NCCL library](https://github.com/NVIDIA/nccl)**
1921
for improved multi-GPU scaling.
2022
* **Optimized GPU memory management** for data and parameters storage, I/O buffers
2123
and workspace for convolutional layers.
22-
* **Parallel data parser and transformer** for improved I/O performance.
24+
* **Parallel data parser, transformer and image reader** for improved I/O performance.
2325
* **Parallel back propagation and gradient reduction** on multi-GPU systems.
2426
* **Fast solvers implementation with fused CUDA kernels for weights and history update**.
2527
* **Multi-GPU test phase** for even memory load across multiple GPUs.
26-
* **Backward compatibility with BVLC Caffe and NVCaffe 0.15**.
28+
* **Backward compatibility with BVLC Caffe and NVCaffe 0.15 and higher**.
2729
* **Extended set of optimized models** (including 16 bit floating point examples).
2830

2931

@@ -45,6 +47,6 @@ Please cite Caffe in your publications if it helps your research:
4547

4648
Libturbojpeg library is used since 0.16.5. It has a packaging bug. Please execute the following (required for Makefile, optional for CMake):
4749
```
48-
sudo apt-get install libturbojpeg libturbojpeg-dev
50+
sudo apt-get install libturbojpeg
4951
sudo ln -s /usr/lib/x86_64-linux-gnu/libturbojpeg.so.0.1.0 /usr/lib/x86_64-linux-gnu/libturbojpeg.so
5052
```

cmake/ConfigGen.cmake

-4
Original file line numberDiff line numberDiff line change
@@ -53,14 +53,10 @@ function(caffe_generate_export_configs)
5353
set(Caffe_DEFINITIONS "")
5454
if(NOT HAVE_CUDA)
5555
set(HAVE_CUDA FALSE)
56-
list(APPEND Caffe_DEFINITIONS -DCPU_ONLY)
5756
endif()
5857

5958
if(USE_LMDB)
6059
list(APPEND Caffe_DEFINITIONS -DUSE_LMDB)
61-
if (ALLOW_LMDB_NOLOCK)
62-
list(APPEND Caffe_DEFINITIONS -DALLOW_LMDB_NOLOCK)
63-
endif()
6460
endif()
6561

6662
if(USE_LEVELDB)

cmake/Dependencies.cmake

+1-6
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,6 @@ if(USE_LMDB)
3434
include_directories(SYSTEM ${LMDB_INCLUDE_DIR})
3535
list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES})
3636
add_definitions(-DUSE_LMDB)
37-
if(ALLOW_LMDB_NOLOCK)
38-
add_definitions(-DALLOW_LMDB_NOLOCK)
39-
endif()
4037
endif()
4138

4239
# ---[ LevelDB
@@ -62,14 +59,12 @@ list(APPEND Caffe_LINKER_LIBS ${JPEGTurbo_LIBRARIES})
6259
include(cmake/Cuda.cmake)
6360
if(NOT HAVE_CUDA)
6461
message(SEND_ERROR "-- CUDA is not detected by cmake. Building without it...")
65-
# TODO: remove this not cross platform define in future. Use caffe_config.h instead.
66-
add_definitions(-DCPU_ONLY)
6762
endif()
6863

6964
# ---[ OpenCV
7065
find_package(OpenCV QUIET COMPONENTS imgcodecs)
7166
if(OPENCV_IMGCODECS_FOUND)
72-
find_package(OpenCV REQUIRED COMPONENTS core imgcodecs imgproc)
67+
find_package(OpenCV REQUIRED COMPONENTS core imgcodecs highgui imgproc videoio)
7368
message(STATUS "Found OpenCV 3.x: ${OpenCV_CONFIG_PATH}")
7469
else()
7570
find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc)

cmake/Summary.cmake

-1
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,6 @@ function(caffe_print_configuration_summary)
115115
caffe_status(" BUILD_docs : ${BUILD_docs}")
116116
caffe_status(" USE_LEVELDB : ${USE_LEVELDB}")
117117
caffe_status(" USE_LMDB : ${USE_LMDB}")
118-
caffe_status(" ALLOW_LMDB_NOLOCK : ${ALLOW_LMDB_NOLOCK}")
119118
caffe_status(" TEST_FP16 : ${TEST_FP16}")
120119
caffe_status("")
121120
caffe_status("Dependencies:")

data/ILSVRC2016/README.md

+29
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
### Preparation
2+
#### ILSVRC2016
3+
We encourage you to register [ILSVRC2016](http://image-net.org/challenges/LSVRC/2016) and download the DET dataset. By default, we assume the data is stored in `$HOME/data/ILSVRC` and will call it `$ILSVRC_ROOT`.
4+
5+
#### ILSVRC2015
6+
If you choose to use ILSVRC2015 DET dataset, here are a few noticeable steps before running the following scripts:
7+
8+
1. There are a few problematic images. You can download the fixed ones [here](http://www.cs.unc.edu/~wliu/projects/SSD/ILSVRC2015_DET_fix.tar.gz).
9+
10+
2. You should download the [val1/val2 split](http://www.cs.unc.edu/~wliu/projects/SSD/ILSVRC2015_DET_val1_val2.tar.gz), courtesy of [Ross Girshick](http://people.eecs.berkeley.edu/~rbg), and put it in `$ILSVRC_ROOT/ImageSets/DET`.
11+
12+
### Remove an invalid file
13+
Find the invalid image file `Data/DET/val/ILSVRC2013_val_00004542.JPEG`, and remove it.
14+
15+
### Create the LMDB file.
16+
After you have downloaded the dataset, we can create the lmdb files.
17+
18+
```Shell
19+
cd $CAFFE_ROOT
20+
# Create the trainval1.txt, val2.txt, val2_name_size.txt, test.txt and test_name_size.txt in data/ILSVRC2016/
21+
python data/ILSVRC2016/create_list.py
22+
# You can modify the parameters in create_data.sh if needed.
23+
# It will create lmdb files for trainval1, val2 and test with encoded original image:
24+
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_trainval1_lmdb
25+
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_val2_lmdb
26+
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_test_lmdb
27+
# and make soft links at examples/ILSVRC2016/
28+
./data/ILSVRC2016/create_data.sh
29+
```

data/ILSVRC2016/create_data.sh

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
cur_dir=$(cd $( dirname ${BASH_SOURCE[0]} ) && pwd )
2+
root_dir=$cur_dir/../..
3+
4+
cd $root_dir
5+
6+
redo=false
7+
data_root_dir="$HOME/data/ILSVRC"
8+
dataset_name="ILSVRC2016"
9+
mapfile="$root_dir/data/$dataset_name/labelmap_ilsvrc_det.prototxt"
10+
db="lmdb"
11+
min_dim=0
12+
max_dim=0
13+
width=0
14+
height=0
15+
16+
extra_cmd="--encode-type=jpg --encoded"
17+
if $redo
18+
then
19+
extra_cmd="$extra_cmd --redo"
20+
fi
21+
22+
for dataset in test
23+
do
24+
python $root_dir/scripts/create_annoset.py --anno-type="classification" --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$dataset".txt" $data_root_dir/$db/DET/$dataset_name"_"$dataset"_"$db examples/$dataset_name 2>&1 | tee $root_dir/data/$dataset_name/$dataset.log
25+
done
26+
27+
for dataset in val2 trainval1
28+
do
29+
python $root_dir/scripts/create_annoset.py --anno-type="detection" --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$dataset".txt" $data_root_dir/$db/DET/$dataset_name"_"$dataset"_"$db examples/$dataset_name 2>&1 | tee $root_dir/data/$dataset_name/$dataset.log
30+
done

data/ILSVRC2016/create_list.py

+109
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
import argparse
2+
import os
3+
from random import shuffle
4+
import shutil
5+
import subprocess
6+
import sys
7+
8+
HOMEDIR = os.path.expanduser("~")
9+
CURDIR = os.path.dirname(os.path.realpath(__file__))
10+
11+
# If true, re-create all list files.
12+
redo = False
13+
# The root directory which holds all information of the dataset.
14+
data_dir = "{}/data/ILSVRC".format(HOMEDIR)
15+
# The directory name which holds the image sets.
16+
imgset_dir = "ImageSets/DET"
17+
# The direcotry which contains the images.
18+
img_dir = "Data/DET"
19+
img_ext = "JPEG"
20+
# The directory which contains the annotations.
21+
anno_dir = "Annotations/DET"
22+
anno_ext = "xml"
23+
24+
train_list_file = "{}/trainval1.txt".format(CURDIR)
25+
val_list_file = "{}/val2.txt".format(CURDIR)
26+
val_name_size_file = "{}/val2_name_size.txt".format(CURDIR)
27+
test_list_file = "{}/test.txt".format(CURDIR)
28+
test_name_size_file = "{}/test_name_size.txt".format(CURDIR)
29+
30+
# Create training set.
31+
# We follow Ross Girschick's split in R-CNN.
32+
if redo or not os.path.exists(train_list_file):
33+
datasets = ["train", "val1"]
34+
img_files = []
35+
anno_files = []
36+
for dataset in datasets:
37+
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
38+
with open(imgset_file, "r") as f:
39+
for line in f.readlines():
40+
name = line.strip("\n").split(" ")[0]
41+
subset = name.split("/")[0].split("_")[1]
42+
anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext)
43+
# Ignore image if it does not have annotation. These are the negative images in ILSVRC.
44+
if not os.path.exists("{}/{}".format(data_dir, anno_file)):
45+
continue
46+
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext)
47+
assert os.path.exists("{}/{}".format(data_dir, img_file))
48+
img_files.append(img_file)
49+
anno_files.append(anno_file)
50+
# Shuffle the images.
51+
idx = [i for i in xrange(len(img_files))]
52+
shuffle(idx)
53+
with open(train_list_file, "w") as f:
54+
for i in idx:
55+
f.write("{} {}\n".format(img_files[i], anno_files[i]))
56+
57+
if redo or not os.path.exists(val_list_file):
58+
datasets = ["val2"]
59+
subset = "val"
60+
img_files = []
61+
anno_files = []
62+
for dataset in datasets:
63+
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
64+
with open(imgset_file, "r") as f:
65+
for line in f.readlines():
66+
name = line.strip("\n").split(" ")[0]
67+
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext)
68+
assert os.path.exists("{}/{}".format(data_dir, img_file))
69+
anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext)
70+
assert os.path.exists("{}/{}".format(data_dir, anno_file))
71+
img_files.append(img_file)
72+
anno_files.append(anno_file)
73+
with open(val_list_file, "w") as f:
74+
for i in xrange(len(img_files)):
75+
f.write("{} {}\n".format(img_files[i], anno_files[i]))
76+
77+
if redo or not os.path.exists(val_name_size_file):
78+
dataset = 'val2'
79+
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
80+
cmd = "{}/../../build/tools/get_image_size --name_id_file={} {} {} {}".format(
81+
CURDIR, imgset_file, data_dir, val_list_file, val_name_size_file)
82+
print cmd
83+
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
84+
output = process.communicate()[0]
85+
86+
if redo or not os.path.exists(test_list_file):
87+
datasets = ["test"]
88+
subset = "test"
89+
img_files = []
90+
for dataset in datasets:
91+
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
92+
with open(imgset_file, "r") as f:
93+
for line in f.readlines():
94+
name = line.strip("\n").split(" ")[0]
95+
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext)
96+
assert os.path.exists("{}/{}".format(data_dir, img_file))
97+
img_files.append(img_file)
98+
with open(test_list_file, "w") as f:
99+
for i in xrange(len(img_files)):
100+
f.write("{} 0\n".format(img_files[i]))
101+
102+
if redo or not os.path.exists(test_name_size_file):
103+
dataset = 'test'
104+
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
105+
cmd = "{}/../../build/tools/get_image_size --name_id_file={} {} {} {}".format(
106+
CURDIR, imgset_file, data_dir, test_list_file, test_name_size_file)
107+
print cmd
108+
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
109+
output = process.communicate()[0]

0 commit comments

Comments
 (0)