一、前言
等着caffe没有膨胀到很大的程度把caffe的代码理一理
(1)第一次阅读Caffe的源码,给人的印象就是里面大量使用了gtest,确实也简化了不少代码,看起来很清晰。
(2)caffe的文档是使用doxygen来生成的,这点在注释里面有体现,对于自己以后的项目也可以借鉴。
二、相关知识:
(1)explicit关键字的作用是禁止隐式转换
比如
A a();
B b = a;// 编译错误
B b(a); //正确
(2)关于const的用法具体参考:
http://blog.csdn.net/Eric_Jo/article/details/4138548
三、具体介绍
BLOB介绍:
看过代码之后,实际上BLOL包含了三类数据
(1)data,前向传播所用到的数据
(2)diff,反向传播所用到的数据
(3)shape,解释data和diff的shape数据
那么围绕这三类数据有对应的方法。
下面给出我的具体的注释:
首先给出blob.h的注释
- #ifndef CAFFE_BLOB_HPP_
- #define CAFFE_BLOB_HPP_
-
- #include
- #include
- #include
-
- #include "caffe/common.hpp"
- #include "caffe/proto/caffe.pb.h"
- #include "caffe/syncedmem.hpp"
- #include "caffe/util/math_functions.hpp"
-
- const int kMaxBlobAxes = 32;
-
- namespace caffe {
-
-
-
-
-
-
-
-
-
- template <typename Dtype>
- class Blob {
- public:
-
- Blob()
- : data_(), diff_(), count_(0), capacity_(0) {}
-
-
- explicit Blob(const int num, const int channels, const int height,
- const int width);
- explicit Blob(const vector<int>& shape);
-
-
-
- void Reshape(const int num, const int channels, const int height,
- const int width);
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- void Reshape(const vector<int>& shape);
- void Reshape(const BlobShape& shape);
- void ReshapeLike(const Blob& other);
-
- inline string shape_string() const {
- ostringstream stream;
- for (int i = 0; i < shape_.size(); ++i) {
- stream << shape_[i] << " ";
- }
- stream << "(" << count_ << ")";
- return stream.str();
- }
- inline const vector<int>& shape() const { return shape_; }
-
-
-
-
-
-
-
-
-
- inline int shape(int index) const {
- return shape_[CanonicalAxisIndex(index)];
- }
-
- inline int num_axes() const { return shape_.size(); }
-
- inline int count() const { return count_; }
-
-
-
-
-
-
-
-
-
- inline int count(int start_axis, int end_axis) const {
-
- CHECK_LE(start_axis, end_axis);
- CHECK_GE(start_axis, 0);
- CHECK_GE(end_axis, 0);
- CHECK_LE(start_axis, num_axes());
- CHECK_LE(end_axis, num_axes());
- int count = 1;
- for (int i = start_axis; i < end_axis; ++i) {
- count *= shape(i);
- }
- return count;
- }
-
-
-
-
-
-
-
- inline int count(int start_axis) const {
- return count(start_axis, num_axes());
- }
-
-
-
-
-
-
-
-
-
-
-
-
-
- inline int CanonicalAxisIndex(int axis_index) const {
-
- CHECK_GE(axis_index, -num_axes())
- << "axis " << axis_index << " out of range for " << num_axes()
- << "-D Blob with shape " << shape_string();
- CHECK_LT(axis_index, num_axes())
- << "axis " << axis_index << " out of range for " << num_axes()
- << "-D Blob with shape " << shape_string();
- if (axis_index < 0) {
- return axis_index + num_axes();
- }
- return axis_index;
- }
-
-
- inline int num() const { return LegacyShape(0); }
-
- inline int channels() const { return LegacyShape(1); }
-
- inline int height() const { return LegacyShape(2); }
-
- inline int width() const { return LegacyShape(3); }
- inline int LegacyShape(int index) const {
- CHECK_LE(num_axes(), 4)
- << "Cannot use legacy accessors on Blobs with > 4 axes.";
- CHECK_LT(index, 4);
- CHECK_GE(index, -4);
- if (index >= num_axes() || index < -num_axes()) {
-
-
-
- return 1;
- }
- return shape(index);
- }
-
- inline int offset(const int n, const int c = 0, const int h = 0,
- const int w = 0) const {
- CHECK_GE(n, 0);
- CHECK_LE(n, num());
- CHECK_GE(channels(), 0);
- CHECK_LE(c, channels());
- CHECK_GE(height(), 0);
- CHECK_LE(h, height());
- CHECK_GE(width(), 0);
- CHECK_LE(w, width());
- return ((n * channels() + c) * height() + h) * width() + w;
- }
-
- inline int offset(const vector<int>& indices) const {
- CHECK_LE(indices.size(), num_axes());
- int offset = 0;
- for (int i = 0; i < num_axes(); ++i) {
- offset *= shape(i);
- if (indices.size() > i) {
- CHECK_GE(indices[i], 0);
- CHECK_LT(indices[i], shape(i));
- offset += indices[i];
- }
- }
- return offset;
- }
-
-
-
-
-
-
-
-
-
-
- void CopyFrom(const Blob& source, bool copy_diff = false,
- bool reshape = false);
-
- inline Dtype data_at(const int n, const int c, const int h,
- const int w) const {
- return cpu_data()[offset(n, c, h, w)];
- }
-
- inline Dtype diff_at(const int n, const int c, const int h,
- const int w) const {
- return cpu_diff()[offset(n, c, h, w)];
- }
-
- inline Dtype data_at(const vector<int>& index) const {
- return cpu_data()[offset(index)];
- }
-
- inline Dtype diff_at(const vector<int>& index) const {
- return cpu_diff()[offset(index)];
- }
-
- inline const shared_ptr& data() const {
- CHECK(data_);
- return data_;
- }
-
- inline const shared_ptr& diff() const {
- CHECK(diff_);
- return diff_;
- }
-
-
- const Dtype* cpu_data() const;
- void set_cpu_data(Dtype* data);
- const int* gpu_shape() const;
- const Dtype* gpu_data() const;
- const Dtype* cpu_diff() const;
- const Dtype* gpu_diff() const;
- Dtype* mutable_cpu_data();
- Dtype* mutable_gpu_data();
- Dtype* mutable_cpu_diff();
- Dtype* mutable_gpu_diff();
-
Y=alpha∗X+beta∗Y
- void Update();
-
- void FromProto(const BlobProto& proto, bool reshape = true);
-
- void ToProto(BlobProto* proto, bool write_diff = false) const;
-
-
- Dtype asum_data() const;
-
- Dtype asum_diff() const;
-
- Dtype sumsq_data() const;
-
- Dtype sumsq_diff() const;
-
-
- void scale_data(Dtype scale_factor);
-
- void scale_diff(Dtype scale_factor);
-
-
-
-
-
-
-
-
-
- void ShareData(const Blob& other);
-
-
-
-
-
-
-
-
-
-
-
- void ShareDiff(const Blob& other);
-
- bool ShapeEquals(const BlobProto& other);
-
- protected:
-
- shared_ptr data_;
-
- shared_ptr diff_;
-
- shared_ptr shape_data_;
-
- vector<int> shape_;
-
- int count_;
-
- int capacity_;
-
- DISABLE_COPY_AND_ASSIGN(Blob);
- };
-
- }
-
- #endif // CAFFE_BLOB_HPP_
接下来给出blob所对应的实现
blob.cpp的注释
- #include
- #include
-
- #include "caffe/blob.hpp"
- #include "caffe/common.hpp"
- #include "caffe/syncedmem.hpp"
- #include "caffe/util/math_functions.hpp"
-
- namespace caffe {
-
-
-
- template <typename Dtype>
- void Blob::Reshape(const int num, const int channels, const int height,
- const int width) {
- vector<int> shape(4);
- shape[0] = num;
- shape[1] = channels;
- shape[2] = height;
- shape[3] = width;
- Reshape(shape);
- }
-
-
- template <typename Dtype>
- void Blob::Reshape(const vector<int>& shape) {
- CHECK_LE(shape.size(), kMaxBlobAxes);
- count_ = 1;
- shape_.resize(shape.size());
- if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
- shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int)));
- }
- int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());
- for (int i = 0; i < shape.size(); ++i) {
-
- CHECK_GE(shape[i], 0);
- CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX";
-
- count_ *= shape[i];
-
- shape_[i] = shape[i];
- shape_data[i] = shape[i];
- }
-
- if (count_ > capacity_) {
- capacity_ = count_;
-
- data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
- diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
- }
- }
-
-
-
- template <typename Dtype>
- void Blob::Reshape(const BlobShape& shape) {
-
- CHECK_LE(shape.dim_size(), kMaxBlobAxes);
-
- vector<int> shape_vec(shape.dim_size());
- for (int i = 0; i < shape.dim_size(); ++i) {
- shape_vec[i] = shape.dim(i);
- }
-
- Reshape(shape_vec);
- }
-
- template <typename Dtype>
- void Blob::ReshapeLike(const Blob& other) {
- Reshape(other.shape());
- }
-
- template <typename Dtype>
- Blob::Blob(const int num, const int channels, const int height,
- const int width)
-
-
- : capacity_(0) {
- Reshape(num, channels, height, width);
- }
-
- template <typename Dtype>
- Blob::Blob(const vector<int>& shape)
-
- : capacity_(0) {
- Reshape(shape);
- }
-
- template <typename Dtype>
- const int* Blob::gpu_shape() const {
- CHECK(shape_data_);
-
-
- return (const int*)shape_data_->gpu_data();
- }
-
- template <typename Dtype>
- const Dtype* Blob::cpu_data() const {
- CHECK(data_);
- / shared_ptr data_;
- return (const Dtype*)data_->cpu_data();
- }
-
- template <typename Dtype>
- void Blob::set_cpu_data(Dtype* data) {
- CHECK(data);
- data_->set_cpu_data(data);
- }
-
- template <typename Dtype>
- const Dtype* Blob::gpu_data() const {
- CHECK(data_);
- return (const Dtype*)data_->gpu_data();
- }
-
- template <typename Dtype>
- const Dtype* Blob::cpu_diff() const {
- CHECK(diff_);
- return (const Dtype*)diff_->cpu_data();
- }
-
- template <typename Dtype>
- const Dtype* Blob::gpu_diff() const {
- CHECK(diff_);
- return (const Dtype*)diff_->gpu_data();
- }
-
- template <typename Dtype>
- Dtype* Blob::mutable_cpu_data() {
- CHECK(data_);
- return static_cast(data_->mutable_cpu_data());
- }
-
- template <typename Dtype>
- Dtype* Blob::mutable_gpu_data() {
- CHECK(data_);
- return static_cast(data_->mutable_gpu_data());
- }
-
- template <typename Dtype>
- Dtype* Blob::mutable_cpu_diff() {
- CHECK(diff_);
- return static_cast(diff_->mutable_cpu_data());
- }
-
- template <typename Dtype>
- Dtype* Blob::mutable_gpu_diff() {
- CHECK(diff_);
- return static_cast(diff_->mutable_gpu_data());
- }
-
-
- template <typename Dtype>
- void Blob::ShareData(const Blob& other) {
- CHECK_EQ(count_, other.count());
- data_ = other.data();
- }
-
- template <typename Dtype>
- void Blob::ShareDiff(const Blob& other) {
- CHECK_EQ(count_, other.count());
- diff_ = other.diff();
- }
-
-
-
-
- template <> void Blobint>::Update() { NOT_IMPLEMENTED; }
- template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }
-
-
-
- template <typename Dtype>
- void Blob::Update() {
-
- switch (data_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
-
-
-
-
-
- caffe_axpy(count_, Dtype(-1),
- static_cast<const Dtype*>(diff_->cpu_data()),
- static_cast(data_->mutable_cpu_data()));
- break;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
-
-
- caffe_gpu_axpy(count_, Dtype(-1),
- static_cast<const Dtype*>(diff_->gpu_data()),
- static_cast(data_->mutable_gpu_data()));
- #else
- NO_GPU;
- #endif
- break;
- default:
- LOG(FATAL) << "Syncedmem not initialized.";
- }
- }
-
- template <> unsigned int Blobint>::asum_data() const {
- NOT_IMPLEMENTED;
- return 0;
- }
-
- template <> int Blob<int>::asum_data() const {
- NOT_IMPLEMENTED;
- return 0;
- }
-
- template <typename Dtype>
- Dtype Blob::asum_data() const {
- if (!data_) { return 0; }
- switch (data_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- return caffe_cpu_asum(count_, cpu_data());
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- {
- Dtype asum;
- caffe_gpu_asum(count_, gpu_data(), &asum);
- return asum;
- }
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return 0;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
- }
- return 0;
- }
-
- template <> unsigned int Blobint>::asum_diff() const {
- NOT_IMPLEMENTED;
- return 0;
- }
-
- template <> int Blob<int>::asum_diff() const {
- NOT_IMPLEMENTED;
- return 0;
- }
-
-
- template <typename Dtype>
- Dtype Blob::asum_diff() const {
- if (!diff_) { return 0; }
- switch (diff_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- return caffe_cpu_asum(count_, cpu_diff());
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- {
- Dtype asum;
- caffe_gpu_asum(count_, gpu_diff(), &asum);
- return asum;
- }
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return 0;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
- }
- return 0;
- }
-
- template <> unsigned int Blobint>::sumsq_data() const {
- NOT_IMPLEMENTED;
- return 0;
- }
-
- template <> int Blob<int>::sumsq_data() const {
- NOT_IMPLEMENTED;
- return 0;
- }
-
-
- template <typename Dtype>
- Dtype Blob::sumsq_data() const {
- Dtype sumsq;
- const Dtype* data;
- if (!data_) { return 0; }
- switch (data_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- data = cpu_data();
- sumsq = caffe_cpu_dot(count_, data, data);
- break;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- data = gpu_data();
- caffe_gpu_dot(count_, data, data, &sumsq);
- #else
- NO_GPU;
- #endif
- break;
- case SyncedMemory::UNINITIALIZED:
- return 0;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
- }
- return sumsq;
- }
-
- template <> unsigned int Blobint>::sumsq_diff() const {
- NOT_IMPLEMENTED;
- return 0;
- }
-
- template <> int Blob<int>::sumsq_diff() const {
- NOT_IMPLEMENTED;
- return 0;
- }
-
-
- template <typename Dtype>
- Dtype Blob::sumsq_diff() const {
- Dtype sumsq;
- const Dtype* diff;
- if (!diff_) { return 0; }
- switch (diff_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- diff = cpu_diff();
- sumsq = caffe_cpu_dot(count_, diff, diff);
- break;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- diff = gpu_diff();
- caffe_gpu_dot(count_, diff, diff, &sumsq);
- break;
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return 0;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
- }
- return sumsq;
- }
-
- template <> void Blobint>::scale_data(unsigned int scale_factor) {
- NOT_IMPLEMENTED;
- }
-
- template <> void Blob<int>::scale_data(int scale_factor) {
- NOT_IMPLEMENTED;
- }
-
-
- template <typename Dtype>
- void Blob::scale_data(Dtype scale_factor) {
- Dtype* data;
- if (!data_) { return; }
- switch (data_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- data = mutable_cpu_data();
- caffe_scal(count_, scale_factor, data);
- return;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- data = mutable_gpu_data();
- caffe_gpu_scal(count_, scale_factor, data);
- return;
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
- }
- }
-
- template <> void Blobint>::scale_diff(unsigned int scale_factor) {
- NOT_IMPLEMENTED;
- }
-
- template <> void Blob<int>::scale_diff(int scale_factor) {
- NOT_IMPLEMENTED;
- }
-
- template <typename Dtype>
- void Blob::scale_diff(Dtype scale_factor) {
- Dtype* diff;
- if (!diff_) { return; }
- switch (diff_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- diff = mutable_cpu_diff();
- caffe_scal(count_, scale_factor, diff);
- return;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- diff = mutable_gpu_diff();
- caffe_gpu_scal(count_, scale_factor, diff);
- return;
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
- }
- }
-
-
- template <typename Dtype>
- bool Blob::ShapeEquals(const BlobProto& other) {
-
- if (other.has_num() || other.has_channels() ||
- other.has_height() || other.has_width()) {
-
-
-
-
-
-
- return shape_.size() <= 4 &&
- LegacyShape(-4) == other.num() &&
- LegacyShape(-3) == other.channels() &&
- LegacyShape(-2) == other.height() &&
- LegacyShape(-1) == other.width();
- }
-
- vector<int> other_shape(other.shape().dim_size());
- for (int i = 0; i < other.shape().dim_size(); ++i) {
- other_shape[i] = other.shape().dim(i);
- }
- return shape_ == other_shape;
- }
-
-
- template <typename Dtype>
- void Blob::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
- if (source.count() != count_ || source.shape() != shape_) {
- if (reshape) {
- ReshapeLike(source);
- } else {
- LOG(FATAL) << "Trying to copy blobs of different sizes.";
- }
- }
- switch (Caffe::mode()) {
- case Caffe::GPU:
-
- if (copy_diff) {
-
-
- caffe_copy(count_, source.gpu_diff(),
- static_cast(diff_->mutable_gpu_data()));
- } else {
- caffe_copy(count_, source.gpu_data(),
- static_cast(data_->mutable_gpu_data()));
- }
- break;
- case Caffe::CPU:
-
- if (copy_diff) {
- caffe_copy(count_, source.cpu_diff(),
- static_cast(diff_->mutable_cpu_data()));
- } else {
- caffe_copy(count_, source.cpu_data(),
- static_cast(data_->mutable_cpu_data()));
- }
- break;
- default:
- LOG(FATAL) << "Unknown caffe mode.";
- }
- }
-
- template <typename Dtype>
- void Blob::FromProto(const BlobProto& proto, bool reshape) {
-
- if (reshape) {
- vector<int> shape;
- if (proto.has_num() || proto.has_channels() ||
- proto.has_height() || proto.has_width()) {
-
-
-
- shape.resize(4);
- shape[0] = proto.num();
- shape[1] = proto.channels();
- shape[2] = proto.height();
- shape[3] = proto.width();
- } else {
- shape.resize(proto.shape().dim_size());
- for (int i = 0; i < proto.shape().dim_size(); ++i) {
- shape[i] = proto.shape().dim(i);
- }
- }
- Reshape(shape);
- } else {
- CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
- }
-
- Dtype* data_vec = mutable_cpu_data();
- if (proto.double_data_size() > 0) {
- CHECK_EQ(count_, proto.double_data_size());
- for (int i = 0; i < count_; ++i) {
- data_vec[i] = proto.double_data(i);
- }
- } else {
- CHECK_EQ(count_, proto.data_size());
- for (int i = 0; i < count_; ++i) {
- data_vec[i] = proto.data(i);
- }
- }
-
- if (proto.double_diff_size() > 0) {
- CHECK_EQ(count_, proto.double_diff_size());
- Dtype* diff_vec = mutable_cpu_diff();
- for (int i = 0; i < count_; ++i) {
- diff_vec[i] = proto.double_diff(i);
- }
- } else if (proto.diff_size() > 0) {
- CHECK_EQ(count_, proto.diff_size());
- Dtype* diff_vec = mutable_cpu_diff();
- for (int i = 0; i < count_; ++i) {
- diff_vec[i] = proto.diff(i);
- }
- }
- }
-
-
-
-
-
- template <>
- void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const {
- proto->clear_shape();
-
- for (int i = 0; i < shape_.size(); ++i) {
- proto->mutable_shape()->add_dim(shape_[i]);
- }
-
- proto->clear_double_data();
- proto->clear_double_diff();
-
- const double* data_vec = cpu_data();
- for (int i = 0; i < count_; ++i) {
- proto->add_double_data(data_vec[i]);
- }
-
- if (write_diff) {
- const double* diff_vec = cpu_diff();
- for (int i = 0; i < count_; ++i) {
- proto->add_double_diff(diff_vec[i]);
- }
- }
- }
-
- template <>
- void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const {
- proto->clear_shape();
- for (int i = 0; i < shape_.size(); ++i) {
- proto->mutable_shape()->add_dim(shape_[i]);
- }
- proto->clear_data();
- proto->clear_diff();
- const float* data_vec = cpu_data();
- for (int i = 0; i < count_; ++i) {
- proto->add_data(data_vec[i]);
- }
- if (write_diff) {
- const float* diff_vec = cpu_diff();
- for (int i = 0; i < count_; ++i) {
- proto->add_diff(diff_vec[i]);
- }
- }
- }
-
- INSTANTIATE_CLASS(Blob);
- template class Blob<int>;
- template class Blobint>;
-
- }
总结:
还是那句老话,read the fxxx source code.
多翻caffe的issue看
参考:
[1]caffe源码分析另一个,写的也挺好。
http://www.cnblogs.com/louyihang-loves-baiyan/
http://www.cnblogs.com/louyihang-loves-baiyan/p/5149628.html
[2]常用的BLAS含义参考
http://www.cnblogs.com/huashiyiqike/p/3886670.html
http://www.netlib.org/blas/
[3]protobuf的参考
http://www.w2bc.com/Article/34963