使用vsomeip tcp_client_endpoint_impl代码实现的客户端通信

client_endpoint_test.h

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 


const uint16_t ILLEGAL_PORT = 0xFFFF;
#define VSOMEIP_DEFAULT_CONNECTING_TIMEOUT      500  // ms
#define VSOMEIP_DEFAULT_CONNECT_TIMEOUT         100  // ms
#define VSOMEIP_MAX_CONNECT_TIMEOUT             1600 // ms
const std::uint32_t MAX_RECONNECTS_UNLIMITED = (std::numeric_limits<std::uint32_t>::max)();
const std::uint32_t QUEUE_SIZE_UNLIMITED = (std::numeric_limits<std::uint32_t>::max)();
const std::uint32_t MESSAGE_SIZE_UNLIMITED = (std::numeric_limits<std::uint32_t>::max)();
#define DEFAULT_NANOSECONDS_MAX std::chrono::nanoseconds::max()
#define VSOMEIP_DEFAULT_NPDU_DEBOUNCING_NANO         20 * 1000 * 1000  // 2ms
#define VSOMEIP_DEFAULT_NPDU_RETENTION_MAXIMUM_NANO  50 * 1000 * 1000  // 5ms
#define VSOMEIP_DEFAULT_BUFFER_SHRINK_THRESHOLD 5
constexpr std::uint32_t VSOMEIP_SOMEIP_HEADER_SIZE       = 8;
constexpr std::size_t VSOMEIP_RETURN_CODE_POS            = 15;
#define VSOMEIP_MAX_TCP_SENT_WAIT_TIME          10000
#define VSOMEIP_BYTES_TO_LONG(x0, x1, x2, x3) (uint32_t((x0) << 24 | (x1) << 16 | (x2) << 8 | (x3)))


enum class cei_state_e : std::uint8_t {
    CLOSED,
    CONNECTING,
    CONNECTED,
    ESTABLISHED
};

std::map<cei_state_e, std::string> state_string = {
    {cei_state_e::CLOSED, "CLOSED"},
    {cei_state_e::CONNECTING, "CONNECTING"},
    {cei_state_e::CONNECTED, "CONNECTED"},
    {cei_state_e::ESTABLISHED, "ESTABLISHED"}
};

enum class cms_ret_e : uint8_t {
    MSG_TOO_BIG,
    MSG_OK,
    MSG_WAS_SPLIT
};

typedef uint8_t byte_t;
typedef std::vector<byte_t> message_buffer_t;
typedef std::shared_ptr<message_buffer_t> message_buffer_ptr_t;
typedef uint16_t service_t;
typedef uint16_t method_t;
typedef uint32_t length_t;

struct train {
    train() : buffer_(std::make_shared<message_buffer_t>()),
    	      minimal_debounce_time_(DEFAULT_NANOSECONDS_MAX),
    	      minimal_max_retention_time_(DEFAULT_NANOSECONDS_MAX),
    	      departure_(std::chrono::steady_clock::now() + std::chrono::hours(6)) {
    }
      
    void reset() {
        buffer_ = std::make_shared<message_buffer_t>();
        minimal_debounce_time_ = DEFAULT_NANOSECONDS_MAX;
    	minimal_max_retention_time_ = DEFAULT_NANOSECONDS_MAX;
        departure_ = std::chrono::steady_clock::now() + std::chrono::hours(6);
    }
    
    message_buffer_ptr_t buffer_;
    std::set<std::pair<service_t, method_t> > passengers_;
    std::chrono::nanoseconds minimal_debounce_time_;      // 防抖时间
    std::chrono::nanoseconds minimal_max_retention_time_;   // 保留时间
    
    std::chrono::steady_clock::time_point departure_;   // 该train的发车时间(发送时间)
};

// client_endpoint_impl中的成员
// socket状态
cei_state_e state_ = cei_state_e::CLOSED;
// server端IP
std::string ip_;
// server端listen的端口
int port_;
// io上下文
boost::asio::io_context io_{}	;
// socket
boost::asio::ip::tcp::socket socket_(io_);
// 操作socket_需要获取该互斥量
std::mutex socket_mutex_;
// io线程,io_context运行在该线程上(io_context::run在哪个线程上,哪个线程就可以从io_context中取出runnable来运行,类似于handler和thread的关系)
std::thread io_thread_;
// tcp server信息(ip+port)
boost::asio::ip::tcp::endpoint remote_;
// 用于在多个线程上串行执行io_context中的一系列runnable(根据时间先后)
boost::asio::io_context::strand strand_(io_);

// 发送线程,自己测试使用
std::thread send_thread_;

// 用于对connect进行超时处理,如果该定时器超时,则说明async_connect超时
boost::asio::steady_timer connecting_timer_(io_);
// 操作connecting_timer_时需要获取该互斥量
std::mutex connecting_timer_mutex_;
// connecting_timer超时时间
uint32_t connecting_timeout_ = VSOMEIP_DEFAULT_CONNECTING_TIMEOUT;

// 用于对重连操作进行超时处理,如果该定时器超时,就执行重连操作
boost::asio::steady_timer connect_timer_(io_);
// 操作connect_timer_时需要获取该互斥变量
std::mutex connect_timer_mutex_;
// connect_timer_定时器的超时时间(每次重连会进行翻倍)
uint32_t connect_timeout_ = VSOMEIP_DEFAULT_CONNECT_TIMEOUT;
// connect尝试次数
uint32_t reconnect_counter_ = 0;


// 在stop/wait_until_sent函数中设置为true,阻止发送流程
bool sending_blocked_ = false;
// 代表是否连接SERVER端成功过, false: 连接成功过  true: 从未连接成功
bool was_not_connected_ = true;

// 当queue_中没有message_buffer时,代表没有数据可以发送了,此时is_sending_变为false,否则为true
bool is_sending_;

// send data
std::shared_ptr<train> train_ = std::make_shared<train>();
std::map<std::chrono::steady_clock::time_point, 
	 std::deque<std::shared_ptr<train> > > dispatched_trains_;  // key: 发车时间  values: 等待发车的train队列
boost::asio::steady_timer dispatch_timer_(io_);
std::chrono::steady_clock::time_point last_departure_;  // 最近一次发送数据完成的时间点
bool has_last_departure_;
std::deque<std::pair<message_buffer_ptr_t, uint32_t> > queue_;  // 等待发送的message_buffer的双向队列,其中,second的uint32_t对于非TP消息,固定为0,TP消息会从配置读取TP包的间隔时间作为值
// 单词发送的消息的最大长度
std::uint32_t max_message_size_(MESSAGE_SIZE_UNLIMITED);  // 对于TCP来说,没有拆包的需求,有多少发多少,拆包是IP层的事情
// queue中当前总共有多少message
std::size_t queue_size_;
std::uint32_t queue_limit_ = QUEUE_SIZE_UNLIMITED;
// 获取queue_中元素需要先取得该互斥变量
std::recursive_mutex mutex_;

// receive data
const uint32_t recv_buffer_size_initial_ = 200;   // recv_buffer_接收缓冲区初始大小,原先设置为VSOMEIP_SOMEIP_HEADER_SIZE, 8字节
message_buffer_ptr_t recv_buffer_(std::make_shared<message_buffer_t>(recv_buffer_size_initial_, 0));
uint32_t shrink_count_ = 0;
const uint32_t buffer_shrink_threshold_ = VSOMEIP_DEFAULT_BUFFER_SHRINK_THRESHOLD;  // buffer收缩次数上限 (5次)
boost::asio::steady_timer sent_timer_(io_);    // 等待socket发送动作结束的定时器(一般重启socket之前会使用该定时器等待发送动作结束)

// application_impl中的成员
// work变量,用于在没有任务的时候阻塞io上下文
boost::asio::io_context::work work_(io_);

std::allocator<void> alloc;

// REQUEST报文的默认防抖时间长度(纳秒)
std::chrono::nanoseconds npdu_default_debounce_requ_(VSOMEIP_DEFAULT_NPDU_DEBOUNCING_NANO);
std::chrono::nanoseconds npdu_default_max_retention_requ_(VSOMEIP_DEFAULT_NPDU_RETENTION_MAXIMUM_NANO);

// 函数申明 begin
// tcp_client_endpoint_impl 成员函数
void start();
void restart(bool _force);
void Connect();   // 原始版本为connect,会和socket.h中的connect冲突,导致传给strand::dispatch的时候出现参数部匹配的情况(需要的是void(void)类型的函数指针)
void receive();
void receive(message_buffer_ptr_t  _recv_buffer, std::size_t _recv_buffer_size, std::size_t _missing_capacity);
void handle_recv_buffer_exception(const std::exception& ex, const message_buffer_ptr_t& _recv_buffer, std::size_t _recv_buffer_size);
void receive_cbk(boost::system::error_code const & _error_code, std::size_t _bytes,
                 const message_buffer_ptr_t& _recv_buffer, std::size_t _recv_buffer_size);
void calculate_shrink_count(const message_buffer_ptr_t& _recv_buffer, std::size_t _recv_buffer_size);
bool send(const uint8_t *_data, uint32_t _size);
void get_configured_times_from_endpoint(
            service_t _service, method_t _method,
            std::chrono::nanoseconds *_debouncing,
            std::chrono::nanoseconds *_maximum_retention);
void send_queued(std::pair<message_buffer_ptr_t, uint32_t> &_entry);
std::size_t write_completion_condition(const boost::system::error_code& _ec, std::size_t _bytes_transferred,
				       std::size_t _bytes_to_send, const std::chrono::steady_clock::time_point _start);
void on_message(const byte_t *_data, 
            length_t _size,
            // endpoint *_receiver, 
            bool _is_multicast,
            // client_t _bound_client, 
            // const vsomeip_sec_client_t *_sec_client,
            const boost::asio::ip::address &_remote_address,
            std::uint16_t _remote_port);    // 消息处理函数
void wait_until_sent(const boost::system::error_code &_error);   // 如果接收异常,需要重启socket,则需要等待发送动作结束

// client_endpoint_impl 成员函数
void shutdown_and_close_socket_unlocked(bool _recreate_socket);
void shutdown_and_close_socket(bool _recreate_socket);
void wait_connect_cbk(boost::system::error_code const & _error);
void start_connect_timer();
void connect_cbk(boost::system::error_code const &_error);
void cancel_and_connect_cbk(boost::system::error_code const &_error);
void wait_connecting_cbk(boost::system::error_code const &_error);
void start_connecting_timer();
void set_established(bool _established);
void set_connected(bool _connected);
bool check_queue_limit(const uint8_t *_data, std::uint32_t _size);
cms_ret_e check_message_size(const std::uint8_t * const _data, std::uint32_t _size);
void cancel_dispatch_timer();
void start_dispatch_timer(const std::chrono::steady_clock::time_point &_now);
void schedule_train();
void update_last_departure();  // 更新最近发车时间,在tcp_client_endpoint_impl::send_cbk中调用
void queue_train(const std::shared_ptr<train> &_train);
std::pair<message_buffer_ptr_t, uint32_t> get_front();
void send_cbk(const boost::system::error_code& _error, const std::size_t _bytes, const message_buffer_ptr_t& _send_msg);
void flush_cbk(boost::system::error_code const & _error);
void flush();

// endpint_manager_impl的回调函数
void on_connect();
void on_disconnect();
// 函数申明集合 end

client_endpoint_test.cpp

#include "client_endpoint_test.h"

service_t service_id = 0;
method_t method_id = 0;

uint64_t get_message_size(const byte_t *_data, size_t _size) {
    uint64_t its_size(0);
    if (VSOMEIP_SOMEIP_HEADER_SIZE <= _size) {
        its_size = VSOMEIP_SOMEIP_HEADER_SIZE
                + VSOMEIP_BYTES_TO_LONG(_data[4], _data[5], _data[6], _data[7]);
    }
    printf("%d, %d, %d, %d, %d, %d, %d, %d\n", _data[0], _data[1], _data[2], _data[3], _data[4], _data[5], _data[6], _data[7]);
    return its_size;
}

// 在client_endpoint_impl中实现
// 关闭当前socket,根据参数会重新创建socket(open操作在Connect中执行)
void shutdown_and_close_socket_unlocked(bool _recreate_socket) {
    if (socket_.is_open()) {
        boost::system::error_code its_error;
        socket_.shutdown(boost::asio::ip::tcp::socket::shutdown_both, its_error);
        socket_.close();
    }
    if (_recreate_socket) {
        socket_ = boost::asio::ip::tcp::socket(io_);  // 创建新的socket替换被close的
    }
}

// 在client_endpoint_impl中实现
void shutdown_and_close_socket(bool _recreate_socket) {
    std::lock_guard<std::mutex> lock(socket_mutex_);
    shutdown_and_close_socket_unlocked(_recreate_socket);
}

// 在client_endpoint_impl中实现
// connect_timer_超时时执行该函数进行重连
void wait_connect_cbk(boost::system::error_code const & _error) {
    if (!_error && !sending_blocked_) {
        strand_.dispatch(Connect);
    }
}

// 在client_endpoint_impl中实现
// 启动重连定时器
void start_connect_timer() {
    std::lock_guard<std::mutex> its_lock(connect_timer_mutex_);
    connect_timer_.expires_from_now(std::chrono::milliseconds(connect_timeout_));
    connect_timeout_ = (connect_timeout_ << 1);
    connect_timer_.async_wait(std::bind(wait_connect_cbk, std::placeholders::_1));
}

// 在client_endpoint_impl中实现
// 无论connect成功还是超时,最终都到connect_cbk中做处理
void connect_cbk(boost::system::error_code const &_error) {
    std::cout << "cei::connect_cbk connect to server has result returned" << std::endl;
    if (_error == boost::asio::error::operation_aborted) {
        // 在connect还没有返回结果前,用户对socket做了close操作,就会导致这个operation_aborted
        std::cout << "connect fail [" << boost::system::system_error(_error).what() << "]" << std::endl;
        shutdown_and_close_socket(false);
        return;
    } else if (sending_blocked_) {
        std::cout << "connect fail because of client endpoint is stopping" << std::endl;
        shutdown_and_close_socket(false);
        return;
    }
    if (_error && _error != boost::asio::error::already_connected) {
        std::cout << "connect to server failed, error: " << boost::system::system_error(_error).what() << std::endl;
        shutdown_and_close_socket(true);
        
        if (state_ != cei_state_e::ESTABLISHED) {
            state_ = cei_state_e::CLOSED;
            on_disconnect();
        }
        if ((++reconnect_counter_) <= MAX_RECONNECTS_UNLIMITED) {
            // 小于最大次数,尝试重连(等待一段时间后进行重连)
            std::cout << "start connect_timer to reconnect" << std::endl;
            start_connect_timer();
        }
    } else {
        // connect操作成功,已经建立连接
        std::cout << "cei::connect_cbk connect to server success" << std::endl;
        // 首先取消重连定时器
        {
            std::lock_guard<std::mutex> lock(connect_timer_mutex_);
            connect_timer_.cancel();
	    }
        // 初始化重连信息,等后面socket异常断开的时候,这些是要给conenct_timer_使用的
        connect_timeout_ = VSOMEIP_DEFAULT_CONNECT_TIMEOUT;
        reconnect_counter_ = 0;
        if (was_not_connected_) {
            was_not_connected_ = false;
            {
                std::lock_guard<std::recursive_mutex> lock(mutex_);
                // 取出queue_中的数据进行发送
                auto its_entry = get_front();
                if (its_entry.first) {   // message_buffer_ptr is not null
                    // is_sending_ = true;   // lcb modify  (annotate)
                    strand_.dispatch(std::bind(send_queued, its_entry));
                }
            }
        }
        if (state_ != cei_state_e::ESTABLISHED) {
            on_connect();
        }
        // 开启接收
        receive();
    }
}

// 在client_endpoint_impl中实现
// cancel指的是取消connecting_timer_定时器
// connect_cbk指调用connect_cbk函数
// 该函数被调用基本代表这connect流程已经有结果了(成功或者ABORT或者别的Connect错误,至少connect有返回值了),准备在connect_cbk中做下一步操作
void cancel_and_connect_cbk(boost::system::error_code const &_error) {
    std::size_t operations_cancelled;
    {
        std::lock_guard<std::mutex> lock(connecting_timer_mutex_);
        // 如果对timer调用了async_wait,那么相当于定时器上挂了异步任务,
        // cancel会导致该异步任务被取消,cancel的时候返回值就是被取消的timer上挂的异步任务数量
    	operations_cancelled = connecting_timer_.cancel();
    }
    if (operations_cancelled != 0) {
        // 相当于之前async_wait的wait_connecting_cbk回调函数不会被调用了
        connect_cbk(_error);
    }
}

// 在client_endpoint_impl中实现
// 该函数被调用代表socket创建后首次connect动作超时了
void wait_connecting_cbk(boost::system::error_code const &_error) {
    // !_error代表connecting_timer_正常超时,而不是被取消的
    // 0 ---> boost::system::errc::success
    //std::cout << "wait_connecting_cbk (" << boost::system::system_error(_error).what() << ")" << std::endl;
    if (!_error && !sending_blocked_) {
        std::cout << "wait connecting operation timeout" << std::endl;
        connect_cbk(boost::asio::error::timed_out);  // 超时处理,进行重连
    }
}

// 在client_endpoint_impl中实现
void start_connecting_timer() {
    std::lock_guard<std::mutex> its_lock(connecting_timer_mutex_);
    // 设置超时时间
    connecting_timer_.expires_from_now(std::chrono::milliseconds(connecting_timeout_));
    // 设置定时器取消或者超时处理函数
    connecting_timer_.async_wait(std::bind(wait_connecting_cbk, std::placeholders::_1));
}

// 在tcp_client_endpoint_impl中实现
void Connect() {
    if (!socket_.is_open()) {
	boost::system::error_code its_error;
        std::cout << "open tcp client socket" << std::endl;
        socket_.open(remote_.protocol(), its_error);
        if (!its_error || its_error == boost::asio::error::already_open) {
            // 设置socket option (NO_DELAY, SO_REUSEADDR, SO_KEEPALIVE, SO_LINGER)
            socket_.set_option(boost::asio::ip::tcp::no_delay(true), its_error);
            if (its_error) {
            	std::cout << "couldn't disable Nagle algorithm" << std::endl;
            	return;
            }
            socket_.set_option(boost::asio::socket_base::keep_alive(true), its_error);
            if (its_error) {
                std::cout << "couldn't enable keep alive" << std::endl;
            	return;
            }
            socket_.set_option(boost::asio::socket_base::reuse_address(true), its_error);
            if (its_error) {
                std::cout << "couldn't enable SO_REUSEADDR" << std::endl;
            	return;
            }
            // 开启SO_LINGER可以在close调用后等待将缓存中的数据发出去
            socket_.set_option(boost::asio::socket_base::linger(true, 0), its_error);
            if (its_error) {
                std::cout << "couldn't enable SO_LINGER" << std::endl;
            	return;
            }
            // 这里没有使用BIND_DEVICE将socket的数据流量绑定在某个特定的网卡上,原始vsomeip中根据配置可能会做绑定
            
            if (port_ < ILLEGAL_PORT) {
                std::cout << "async connect to remote [" << ip_ << ":" << port_ << "]" << std::endl;
                state_ = cei_state_e::CONNECTING;  // 更新socket状态
	        start_connecting_timer();    // 启动connecting超时定时器
                // 异步连接远端tcp server,以cancel_and_connect_cbk函数作为回调接收connect结果
                socket_.async_connect(remote_, strand_.wrap(std::bind(cancel_and_connect_cbk, std::placeholders::_1)));
            }
        }
    }
}

// 在tcp_client_endpoint_impl中实现
void receive() {
    message_buffer_ptr_t its_recv_buffer;
    {
        std::lock_guard<std::mutex> lock(socket_mutex_);
        its_recv_buffer = recv_buffer_;    // restart的时候会重新创建buffer
    }
    strand_.dispatch([&its_recv_buffer]() {
        receive(its_recv_buffer, 0, 0);    // 这个是receive的源头,因此,后面两个参数都是0
    });
    
}

// 在tcp_client_endpoint_impl中实现
/**
 * _recv_buffer: vector, 用于结束数据的缓存
 * _recv_buffer_size: 当前_recv_buffer中内容的字节数(已经通过async_read已经收取到的内容的长度)
 * _missing_capacity: 当前_recv_buffer需要扩容的字节数
 */
void receive(message_buffer_ptr_t  _recv_buffer, std::size_t _recv_buffer_size, std::size_t _missing_capacity) {
    std::cout << "tce::receive recv_buffer_size: " << _recv_buffer_size << ", missing_capacity: " << _missing_capacity << std::endl;
    std::lock_guard<std::mutex> lock(socket_mutex_);
    if (socket_.is_open()) {
        const std::size_t its_capticity(_recv_buffer->capacity());
        size_t buffer_size = its_capticity - _recv_buffer_size;   // 本次要receive的字节数
        try {
            if (_missing_capacity) {   // _missing_capacity大于0,说明容量不够,需要扩容
                if (_missing_capacity > MESSAGE_SIZE_UNLIMITED) {
                    std::cout << "tce:receive Missing receive buffer capacity exceed allowed maximum" << std::endl;
                    return;
                }
                const std::size_t its_required_size(_recv_buffer_size + _missing_capacity);  // 新的容量大小为(已经在缓存中的内容大小 + _missing_capacity)
                if (its_capticity < its_required_size) {
                    _recv_buffer->reserve(its_required_size);      // 根据新的容量大小重新分配空间
                    _recv_buffer->resize(its_required_size, 0x00);
                    if (_recv_buffer->size() > 1048576U) {
                        std::cout << "tce:receive recv_buffer size is:" << _recv_buffer->size() << std::endl;
                    }
                }
                buffer_size = _missing_capacity;
            } else if (buffer_shrink_threshold_ 
                    && shrink_count_> buffer_shrink_threshold_  // buffer_shrink_threshold_是个阈值,当count大于该阈值的时候,才正式对buffer做收缩,将容量收缩到8个字节
                    && (buffer_size == 0)) {      // 发现容量已经满了,(容量 - 已经收取的字节数 = 0)
                recv_buffer_->resize(recv_buffer_size_initial_, 0x00);   // 进行收缩
                recv_buffer_->shrink_to_fit();
                buffer_size = recv_buffer_size_initial_;
                shrink_count_ = 0;
            }
        } catch(const std::exception& ex) {
            // reconnect and resize buffer
            handle_recv_buffer_exception(ex, _recv_buffer, _recv_buffer_size);
            return;
        }
        // 从buffer最后收取的字节的下一位开始的buffer_size个字节作为这一轮读取内容的存放位置
        socket_.async_receive(boost::asio::buffer(&((*recv_buffer_)[_recv_buffer_size]), buffer_size), 
                              strand_.wrap(std::bind(receive_cbk,       // 回调函数
                                                  std::placeholders::_1,    // 错误码(占位)
                                                  std::placeholders::_2,    // 读取到的字节数(占位)
                                                  _recv_buffer,             // 保存读取到的所有数据的缓存
                                                  _recv_buffer_size))       // 目前已经读取到的字节数
                           );
    }
}

// 在tcp_client_endpoint_impl中实现
void handle_recv_buffer_exception(const std::exception& ex, const message_buffer_ptr_t& _recv_buffer, std::size_t _recv_buffer_size) {

}

// 在tcp_client_endpoint_impl中实现
void receive_cbk(boost::system::error_code const & _error_code, std::size_t _bytes,
                 const message_buffer_ptr_t& _recv_buffer, std::size_t _recv_buffer_size) {
    if (_error_code == boost::asio::error::operation_aborted) {
        // user cancel current async receive operation before data received, return directly
        std::cout << "tce::receive_cbk async receive fail: [" << boost::system::system_error(_error_code).what() << "]" << std::endl;
        return;
    }
    std::cout << "tce::receive_cbk " << _bytes << " bytes read, recv_buffer_size is " << _recv_buffer_size << std::endl;
    std::unique_lock<std::mutex> its_lock(socket_mutex_);
    {
        // 没有处理 magic cookik, 后期读懂了再加回来
        uint32_t its_missing_capacity = 0;
        // async_receive return success and read some bytes from socket buffer
        if (!_error_code && _bytes > 0) {
            if ((_recv_buffer_size + _bytes) > _recv_buffer->size()) {
                // 按理说是不会超过buffer容量的,因为async_receive的第二个参数标识要读取多少字节,这个参数是根据容量算出来的
                std::cout << "tce::receive_cbk receive buffer overflow, abort" << std::endl;
                return;
            }
            _recv_buffer_size += _bytes;  // 更新已经读取到recv_buffer_中的字节数

            size_t its_iteration_gap = 0;
            bool has_full_message(false);
            do {
                uint64_t message_size = get_message_size(&((*_recv_buffer)[its_iteration_gap]), _recv_buffer_size);
                std::cout << "tce::receive_cbk someip message size is " << message_size << ", buffer capacity is " << _recv_buffer->capacity() <<  std::endl;
                if (message_size > MESSAGE_SIZE_UNLIMITED) {
                    std::cout << "invalid message size :" << message_size << ", overflowed" << std::endl;
                    return;
                }
                uint32_t current_message_size = static_cast<uint32_t>(message_size);
                has_full_message = (current_message_size > VSOMEIP_RETURN_CODE_POS )  // 消息长度是合法的(大于RETURN_CODE字段所在的位索引
                                && (current_message_size <= _recv_buffer_size);        // 读取到的总字节数大于消息的长度
                if (has_full_message) {
                    its_lock.unlock();    // 释放socket锁,这样至少在处理消息期间,可以在别的线程对socket做操作(read/write/close...)
                    // 处理消息
                    on_message(&((*_recv_buffer)[its_iteration_gap]),   // 消息起始位置
                               current_message_size,    // 消息长度
                               false,       // 非组播
                               remote_.address(),   // 服务端地址
                               remote_.port()     // 服务端监听端口
                    );
                    its_lock.lock();      // 重新获取socket锁
                    // 更新收缩次数(用于判断是否需要对recv_buffer_容量进行收缩,如果次数到达阈值,就对buffer进行容量收缩)
                    calculate_shrink_count(_recv_buffer, _recv_buffer_size);
                    _recv_buffer_size -= current_message_size;   // 读取到的总大小中减去当前消息的大小
                    its_iteration_gap += current_message_size;   // 让读取指针移到下一个消息的起始位置
                    its_missing_capacity = 0;
                }

                if (!has_full_message) {
                    if (_recv_buffer_size > VSOMEIP_RETURN_CODE_POS) {  // 首先判断这次的消息读取是否会超出recv_buffer_已收取内容的范围
                        bool invalid_param_detected(false);
                        if (recv_buffer_->size() <= (its_iteration_gap + VSOMEIP_RETURN_CODE_POS)) {
                            // 下一个SOMEIP消息的开始读取位置已经超出recv_buffer_目前收取到的内容的范围外面去了
                            // 例如recv_buffer_目前收消息总共200个字节,但是接收完成第一个消息后,
                            // 下一个消息的起始位置its_iteration_gap向后偏移VSOMEIP_RETURN_CODE_POS字节
                            // 偏移后的位置已经超过200,超出了recv_buffer_目前已存放内容的长度范围了
                            invalid_param_detected = true;
                            std::cout << "tce::receive_cbk trying to access invalid vector position. "
                                      << " Actual: " << recv_buffer_->size()
                                      << " Current: " << current_message_size
                                      << " Bytes: " << _bytes
                                      << " Begin Pos: " << its_iteration_gap
                                      << " Is_Full_Message: " << (has_full_message ? "true" : "false") << std::endl;
                        }

                        if (invalid_param_detected) {
                            state_ = cei_state_e::CONNECTING;
                            shutdown_and_close_socket_unlocked(false);
                            its_lock.unlock();
                            wait_until_sent(boost::asio::error::operation_aborted);   // 等待发送队列清空后就重启socket
                            return;    // 退出接收
                        }
                    }
                    // _recv_buffer_size代表目前recv_buffer_中还有多少字节没有读取,如果小于当前要读取的消息的总长度,则说明需要扩容
                    if (current_message_size > _recv_buffer_size) {  
                        its_missing_capacity = current_message_size - static_cast<uint32_t>(_recv_buffer_size);  // 更新需要扩容的大小
                    } else if (VSOMEIP_SOMEIP_HEADER_SIZE > _recv_buffer_size) {  // 这种情况说明recv_buffer_剩余可以读取的内容长度过小,连SOMEIP消息头都读不完全
                        its_missing_capacity = VSOMEIP_SOMEIP_HEADER_SIZE - static_cast<uint32_t>(_recv_buffer_size);  // 这种情况下先扩容到可以将SOMEIP消息头读完整的大小
                    }

                }
                std::cout << "tce::receive_cbk loop message, _recv_buffer_size: " << _recv_buffer_size 
                          << ", has_full_message: " << (has_full_message ? "true" : "false") << std::endl;
            } while(has_full_message && _recv_buffer_size);   // 因为一次receive可能会收到多个消息,因此,_recv_buffer中可能存在多个message
            if (its_iteration_gap) {  // 说明此次处理还遗留了一包不完整的SOMEIP MESSAGE在Buffer中
                memcpy(&((*_recv_buffer)[0]), &((*_recv_buffer)[its_iteration_gap]), _recv_buffer_size);  //将遗留的那包数据前移
                // 不完整的SOMEIP MESSAGE前移到buffer头部后判断容量是否还不够
                if (its_missing_capacity && (its_missing_capacity <= (_recv_buffer->capacity() - _recv_buffer_size))) {
                    // 前移后空间足够((容量 - 最后一包SOMEIP MESSAGE已经读取的部分) > 之前计算的空缺容量)
                    its_missing_capacity = 0;
                }
            }
            its_lock.unlock();   // 释放对socket操作的锁
            // 继续读取
            strand_.dispatch([&_recv_buffer, _recv_buffer_size, its_missing_capacity]() {
                receive(_recv_buffer, _recv_buffer_size, its_missing_capacity);
            });
        } else {    // error handle
            std::cout << "tce::connect_cbk error (" << boost::system::system_error(_error_code).what() << ")" <<  std::endl;
            if (boost::asio::error::eof == _error_code    // 连接关闭(正常)
             || boost::asio::error::timed_out == _error_code   // 超时
             || boost::asio::error::bad_descriptor == _error_code   // socket文件描述符错误
             || boost::asio::error::connection_reset == _error_code) {  // 连接关闭(异常,例如对方进程被kill)
                if (state_ == cei_state_e::CONNECTING) {
                    // 已经在restart socket了
                    std::cout << "tce::connect_cbk already restarting socket" << std::endl;
                } else {
                    state_ = cei_state_e::CONNECTING;
                    shutdown_and_close_socket(false);
                    its_lock.unlock();
                    wait_until_sent(boost::asio::error::operation_aborted);  // 重启socket
                }
            } else {
                its_lock.unlock();
                // 继续读取
                strand_.dispatch([&_recv_buffer, _recv_buffer_size, its_missing_capacity]() {
                    receive(_recv_buffer, _recv_buffer_size, its_missing_capacity);
                });
            }
        }
    }
}

// 在applicaiton_impl中实现
void on_message(const byte_t *_data, 
                length_t _size,
                bool _is_multicast,
                const boost::asio::ip::address &_remote_address,
                std::uint16_t _remote_port) {
    std::cout << "read complete someip message, size " << _size << std::endl;
}

// 在tcp_client_endpoint_impl中实现
void calculate_shrink_count(const message_buffer_ptr_t& _recv_buffer, std::size_t _recv_buffer_size) {
    if (buffer_shrink_threshold_ > 0) {   // 有设置阈值的情况下才会进行buffer容量的收缩
        if (_recv_buffer->capacity() != recv_buffer_size_initial_) {    // 说明在之前的接收过程中动态扩容过
            if (_recv_buffer_size < _recv_buffer->capacity() >> 1) {   // 如果接收到的消息还为占用一半的容量,则表示需要收缩buffer的容量
                shrink_count_++;
            } else {
                shrink_count_ = 0;
            }
        }
    }
}

// 在tcp_client_endpoint_impl中实现
void wait_until_sent(const boost::system::error_code &_error) {
    // mutex_锁对应发送队列queue_,这里获取锁用来获取is_sending_发送状态
    std::unique_lock<std::recursive_mutex> its_lock(mutex_);
    if (!is_sending_ || !_error) {  // !is_sending_: 代表queue_发送队列空了,发完了, _error代表等待超时了
        its_lock.unlock();
        if (!_error) {  // 等待超时
            std::cout << "tce::" << __func__ << " : Maximum wait time for send operation" << std::endl;
        }
        on_disconnect();
        restart(true);
    } else {
        std::chrono::milliseconds its_timeout(VSOMEIP_MAX_TCP_SENT_WAIT_TIME);    // 等待10秒中超时后重新检查是否queue_发送完成
        boost::system::error_code ec;
        sent_timer_.expires_after(its_timeout);
        sent_timer_.async_wait(std::bind(wait_until_sent, std::placeholders::_1));
    }
    
}

void restart(bool _force) {
    auto restart_func = []() {

        start_connect_timer();
    };
    strand_.dispatch(restart_func);
}

// 在client_endpoint_impl中实现
void set_established(bool _established) {
    if (_established) {
        if (state_ != cei_state_e::CONNECTING) {
            std::lock_guard<std::mutex> lock(socket_mutex_);
            if (socket_.is_open()) {
                state_ = cei_state_e::ESTABLISHED;
            } else {
                state_ = cei_state_e::CLOSED;
            }
        }
    } else {
        state_ = cei_state_e::CLOSED;
    }
    std::cout << "socket state change to " << state_string[state_] << std::endl;
}

// 在client_endpoint_impl中实现
void set_connected(bool _connected) {
    if (_connected) {
        std::lock_guard<std::mutex> lock(socket_mutex_);
        if (socket_.is_open()) {
            state_ = cei_state_e::CONNECTED;
        } else {
            state_ = cei_state_e::CLOSED;
        }
    } else {
        state_ = cei_state_e::CLOSED;
    }
}

// 在tcp_client_endpoint_impl中实现
// 启动tcp_cient_endpoint_impl,开始连接server并且接收/发送/重连操作
void start() {
    std::cout << "tcp client endpoint start" << std::endl;
    strand_.dispatch(Connect);
}

// 在client_endpoint_impl中实现
// 发送报文
bool send(const uint8_t *_data, uint32_t _size) {
    std::lock_guard<std::recursive_mutex> lock(mutex_);
    std::cout << "cei::send data size is " << _size << std::endl;
    bool must_depart(false);
    auto its_now(std::chrono::steady_clock::now());
    if (sending_blocked_ || !check_queue_limit(_data, _size)) {
        return false;
    }
    // 检查是否需要拆包,对于TCP不需要
    switch(check_message_size(_data, _size)) {
    	case cms_ret_e::MSG_WAS_SPLIT: 
            std::cout << "cei::send check message size fail [MSG_WAS_SPLIT]" << std::endl;
    	    return true;  // 因为check_message_size返回MSG_WAS_SPLIT时已经在内部调用过send_segments了
    	    break;
    	case cms_ret_e::MSG_TOO_BIG:
            std::cout << "cei::send check message size fail [MSG_TOO_BIG]" << std::endl;
    	    return false;
    	    break;
    	case cms_ret_e::MSG_OK:
	    default:
    	    break;
    }
    // Cancel dispatch timer
    cancel_dispatch_timer();

    // Check if the passenger enters an empty train
    std::chrono::nanoseconds its_debouncing(0), its_retention(0);
    get_configured_times_from_endpoint(service_id, method_id, &its_debouncing, &its_retention);
    std::cout << "cei::send debouncing: " << (its_debouncing.count() / 1000000) << "ms, "
              << "retention: " << (its_retention.count() / 1000000) << "ms" << std::endl;
    const std::pair<service_t, method_t> its_identifier = std::make_pair(service_id, method_id);
    if (train_->passengers_.empty()) {
        train_->departure_ = its_now + its_retention;   // latest possible
    } else {
        // Check whether the current train already contains the message
        if (train_->passengers_.end() != train_->passengers_.find(its_identifier)) {
            must_depart = true;
        } else {
            // Check debouncing time
            if (its_debouncing > train_->minimal_max_retention_time_) {
                // train's latest departure would already undershot new passengers's debounce time
                must_depart = true;
            } else {
                // Check Maximum retention time
                if (its_retention < train_->minimal_debounce_time_) {
                    // train's earliest departure would already exceed the new passenger's retention time
                    must_depart = true;
                } else {
                    if (its_now + its_retention < train_->departure_) {
                        // update train's departure
                        train_->departure_ = its_now + its_retention;
                    }
                }
            }
        }
    }
    std::cout << "cei::send must_depart is " << (must_depart ? "true" : "false") << std::endl;
    // if necessary, send current buffer and create a new one
    if (must_depart) {
        // check if debounce time would be undershot
        schedule_train();
        
        train_ = std::make_shared<train>();
        train_->departure_ = its_now + its_retention;
    }

    train_->buffer_->insert(train_->buffer_->end(), _data, _data + _size);
    train_->passengers_.insert(its_identifier);
    // update train's minimal debounce time if necessary
    if (its_debouncing < train_->minimal_debounce_time_) {
        train_->minimal_debounce_time_ = its_debouncing;
    }
    // update train's minimal retention time if necessary
    if (its_retention < train_->minimal_max_retention_time_) {
        train_->minimal_max_retention_time_ = its_retention;
    }
    
    // restart dispatch timer with next departure time
    start_dispatch_timer(its_now);
    
    return true;
}

void get_configured_times_from_endpoint(
            service_t _service, method_t _method,
            std::chrono::nanoseconds *_debouncing,
            std::chrono::nanoseconds *_maximum_retention) {
    *_debouncing = npdu_default_debounce_requ_;
    *_maximum_retention = npdu_default_max_retention_requ_;
}

void cancel_dispatch_timer() {
    boost::system::error_code ec;
    dispatch_timer_.cancel(ec);
}

// 在client_endpoint_impl中实现
// 检查是否会因为加入参数中的消息导致待发送队列中消息的总大小超出限制
bool check_queue_limit(const uint8_t *_data, std::uint32_t _size) {
    if (queue_limit_ != QUEUE_SIZE_UNLIMITED
    && queue_size_ + _size > queue_limit_) {
        std::cout << "cei::check_queue_limit: queue size limit ("
                  << queue_limit_ << ") reached. Dropping message. "
                  << "queue size: " << queue_size_
                  << ", data size: " << _size << std::endl;
        return false;
    }
    return true;
}

// 在client_endpoint_impl中实现
// 检查参数中的消息是否需要拆包
cms_ret_e check_message_size(const std::uint8_t * const _data, std::uint32_t _size) {
    cms_ret_e ret(cms_ret_e::MSG_OK);
    // TCP不支持TP
    return ret;
}

// 在client_endpoint_impl中实现
// 将当前train加入发车队列中
void schedule_train() {
    if (has_last_departure_) {
        if (last_departure_ + train_->minimal_debounce_time_ > train_->departure_) {
            train_->departure_ = (last_departure_ + train_->minimal_debounce_time_);
        }
    }
    std::cout << "cei::schedule_train add current train into dispatched_trains_" << std::endl;
    dispatched_trains_[train_->departure_].push_back(train_);
    train_.reset(new train());
}

// 在client_endpoint_impl中实现
void start_dispatch_timer(const std::chrono::steady_clock::time_point &_now) {
    // Choose the next train
    std::shared_ptr<train> its_train(train_);
    if (!dispatched_trains_.empty()) {
        auto its_dispatched = dispatched_trains_.begin();
        if (its_dispatched->first < its_train->departure_) {
	        its_train = its_dispatched->second.front();	
        }
    }

    std::chrono::nanoseconds its_offset;
    if (its_train->departure_ > _now) {
        its_offset = std::chrono::duration_cast<std::chrono::nanoseconds>(its_train->departure_ - _now);
    } else {
        its_offset = std::chrono::nanoseconds::zero();
    }
    std::cout << "cei::start_dispatch_timer, expire after " << (its_offset.count() / 1000000) << " ms" << std::endl;
    dispatch_timer_.expires_from_now(its_offset);
    dispatch_timer_.async_wait(std::bind(flush_cbk, std::placeholders::_1));
}

// 在client_endpoint_impl中实现
void flush_cbk(boost::system::error_code const & _error) {
    if (!_error) {
        // dispatch_timer_正常超时
        (void)flush();
    }
}

// 在client_endpoint_impl中实现
void flush() {
    bool has_queued(true);
    bool is_current_train(true);
    std::cout << "cei::flush dispatched_trains_ contain " << dispatched_trains_.size() << " trains." << std::endl;
    std::lock_guard<std::recursive_mutex> lock(mutex_);
    std::shared_ptr<train> its_train(train_);
    if (!dispatched_trains_.empty()) {
        auto its_dispatched = dispatched_trains_.begin();
        if (its_dispatched->first <= its_train->departure_) {
            is_current_train = false;
            its_train = its_dispatched->second.front();
            its_dispatched->second.pop_front();  // 可以pop出去,因为已经保存到its_train中了
            if (its_dispatched->second.empty()) {
                dispatched_trains_.erase(its_dispatched);
            }
        }
    }

    if (!its_train->buffer_->empty()) {
        queue_train(its_train);

        // Reset current train if necessary
        if (is_current_train) {
            std::cout << "cei::flush reset current train " << std::endl;
            // its_train.reset();     // lcb modify  (annotate)
            train_.reset(new train());    // lcb modify  (add)
        }
    } else {
        has_queued = false;
    }

    if (!is_current_train || !dispatched_trains_.empty()) {
        auto its_now(std::chrono::steady_clock::now());
        start_dispatch_timer(its_now);
    }
}

void update_last_departure() {
    last_departure_ = std::chrono::steady_clock::now();
    has_last_departure_ = true;
}

std::pair<message_buffer_ptr_t, uint32_t> get_front() {
    std::pair<message_buffer_ptr_t, uint32_t> its_entry;
    if (queue_.size()) {
        its_entry = queue_.front();
    }
    return its_entry;
}

void queue_train(const std::shared_ptr<train> &_train) {
    queue_size_ += _train->buffer_->size();  // 更新当前队列中堆积的消息总数
    queue_.emplace_back(_train->buffer_, 0);
    std::cout << "cei::queue_train begin" << std::endl;
    if (!is_sending_ && !queue_.empty()) {
        auto its_entry = get_front();
        if (its_entry.first) {   // message_buffer_ptr is not null
            // is_sending_ = true;   // lcb modify  (annotate)
            strand_.dispatch(std::bind(send_queued, its_entry));
        } else {
            std::cout << "cei::queue_train front entry in queue_ is empty" << std::endl;
        }
    } else {
        std::cout << "cei::queue_train send condition not meet, "
                  << "is_sending_:" << (is_sending_ ? "true" : "false") 
                  << "queue_size_: " << queue_.size() << std::endl;
    }
}

// 在tcp_client_endpoint_impl中实现
void send_queued(std::pair<message_buffer_ptr_t, uint32_t> &_entry) {
    // 首先根据配置决定是否发送magic cookie,这里暂时省略
    // 发送实际message
    {
        std::lock_guard<std::mutex> lock(socket_mutex_);
        if (socket_.is_open()) {
            // async_writer针对发送包的大小内部会调用多次async_write_some来发送,每次调用完成回调complete condition判断是否继续
            std::cout << "cei::send_queued buffer size is " << _entry.first->size() << ", first char is [" << (char)((*_entry.first)[0]) << "]"<< std::endl;
            is_sending_ = true;  // lcb modify  (add)
            boost::asio::async_write(socket_,    // socket
                                     boost::asio::buffer(*_entry.first),  // buffer
                                     std::bind(write_completion_condition,
                                               std::placeholders::_1,  // 错误码
                                               std::placeholders::_2, // 本地发送的字节数
                                               _entry.first->size(),  // 总共要发送的字节数
                                               std::chrono::steady_clock::now() // 发送的起始时间
            			                      ), // complete condition  (判断_entry.first buffer内容是否全部完成发送的条件判断函数)
            			             strand_.wrap(std::bind(send_cbk,
            			     		     	                std::placeholders::_1,  // 错误码
            			     			                    std::placeholders::_2,  //
            			     			                    _entry.first)  // writer handler (_entry.first buffer内容全部完成发送后才会回调wirte handler函数)
                                    ));
            						
        }
    }
}

// client_endpoint_impl中实现
// 作为条件函数被会调,用于判断buffer的内容是否发送完毕
// 返回0标识发送结束,非0标识还剩多少字节需要发送
std::size_t write_completion_condition(const boost::system::error_code& _error,
				       std::size_t _bytes_transferred,
				       std::size_t _bytes_to_send,
				       const std::chrono::steady_clock::time_point _start) {
    if (_error) {
        // 发生了错误,直接中断此次发送,因此返回的后续发送字节数为0,不会再调用async_write_some
        std::cout << "tce::write_completion_condition: "
                  << _error.message() << "(" << std::dec << _error.value() << "), "
                  << "bytes transferred: " << std::dec << _bytes_transferred << ", "
                  << "bytes to send: " << std::dec << _bytes_to_send << ", "
                  << "remote: [" << remote_.address().to_string() << ":" << remote_.port() << "]"
                  << std::endl;
                  return 0;
    }    
    // 处理发送延时过大时的error输出,不会中断发送,因此这里忽略不写
    // 更新下一次发送的字节数
    return _bytes_to_send - _bytes_transferred;
}

// client_endpoint_impl中实现
// buffer全部发送完毕后async_write会回调该函数
void send_cbk(const boost::system::error_code& _error, const std::size_t _bytes, const message_buffer_ptr_t& _send_msg) {
    (void)_bytes;
    if (!_error) {  // 上一包完整的message_buffer发送完成,准备发送下一包
        std::cout << "cei::send_cbk send last message success, sent bytes is " << _bytes << ", first char is [" << (char)((*_send_msg)[0]) << "]" << std::endl;
        std::lock_guard<std::recursive_mutex> lock(mutex_);
        if (queue_.size() > 0) {
            queue_size_ -= queue_.front().first->size();  // 需要减去已经发送的message_buffer的字节数
            queue_.pop_front();  // 从queue_中移除已经发送的message_buffer
            
            update_last_departure();  // 更新last_departure_成员为当前时间点
            std::cout << "remove first message buffer at queue_, current queue_ size is " << queue_.size() << std::endl;
            if (queue_.empty()) {
                is_sending_ = false;
            } else {
                auto its_entry = get_front();
                if (its_entry.first) {
                    send_queued(its_entry);  // 发送queue_中下一个message_buffer
                }
            }
        }
        return;
    } else {
        std::cout << "cei::send_cbk received error (" << _error.message() << "), "
                  << "remote[" << remote_.address().to_string() << ":" << remote_.port() << "]" << std::endl;
        if (_error == boost::asio::error::broken_pipe   // 连接断开(异常)
         || _error == boost::asio::error::not_connected
         || _error == boost::asio::error::bad_descriptor
         || _error == boost::asio::error::no_permission
         || _error == boost::asio::error::operation_aborted) { // 连接断开(主动)
            state_ = cei_state_e::CLOSED;
            {
                std::lock_guard<std::recursive_mutex> lock(mutex_);
                queue_.clear();
                queue_size_ = 0;
                was_not_connected_ = true;
                if (_error == boost::asio::error::operation_aborted) {
                    sending_blocked_ = true;         // 因为是主动关闭,因此设置sending_blocked用于让用户主动调用的send无功而返
                    shutdown_and_close_socket(false);   // 只关闭socket,因为是用户主动操作关闭了连接
                } else {
                    shutdown_and_close_socket(true);    // 被动原因关闭连接,所以关闭并且重建socket
                    strand_.dispatch(Connect);       // 重连socket
                }
            }
        }
        std::lock_guard<std::recursive_mutex> lock(mutex_);
        is_sending_ = false;   // 更新发送状态为false (非发送queue中)
    }
}

// 在endpoint_manager_impl中实现
void on_disconnect() {
    // 对应端口上的service/instance变为UNAVALIABLE   
}

// 在endpoint_manager_impl中实现
void on_connect() {
    set_connected(true);
    // 从remote_services_中查看当前连接是否对应remote service的instance对外开发的reliable端口
    // remote_services_是一个嵌套map容器,最终保存了每个远端service/instance的reliable/unreliable endpoint信息
    // 在endpoint_manager_impl调用create_remote_client的时候保存到remote_services_容器中
    // if (!rm_->find_service()) {   // 查找对应的remote service instance发布过OFFER报文, rm_为routing_manager_impl
        // 如果对方没有发布过OFFER报文,则状态改为ESTABLISHED,
        set_established(true);
    // }
    // 走到这里说明remote service instance发布过OFFER报文
    // const auto its_other_endpoint = its_info->get_endpoint(!endpoint_is_reliable);  // todo

}

int main(int argc, char** argv) {
    cmdline::parser p;
    p.add<std::string>("host", 'h', "dest endpoint ip", true, "127.0.0.1");
    p.add<int>("port", 'p', "dest endpoint port", true, 25556);
    
    p.parse_check(argc, argv);
    
    ip_ = p.get<std::string>("host");
    port_ = p.get<int>("port");
    
    std::cout << "ip:" << ip_ << "," << "port:" << port_ << std::endl;
    
    if (ip_.length() > 0 && port_ > 0) {
    	remote_ = boost::asio::ip::tcp::endpoint(boost::asio::ip::make_address(ip_), port_);
        
        start();  // start tcp client endpoint
        
        io_thread_ = std::thread([]() {
            std::cout << "io thread begin..." << std::endl;
            io_.run();
            std::cout << "io thread end..." << std::endl;
        });
        
        send_thread_ = std::thread([]() {
            static uint8_t buffer[2048] = { 0 };
            buffer[2047] = 10;  // LR
            static char content = 'a';
            while (true) {
                for(int i = 0; i < 3; i++) {
                    memset(buffer, content, 2046);
                    send(buffer, 2048);
                    content++;
                    if (content == 'd') {
                        content = 'a';
                    }
                    
                }
                sleep(10);
            }
        });
        
        io_thread_.join();
    } else {
        std::cout << "invalid param" << std::endl;
        return -1;
    }

    return 0;
}

CMakeLists.txt

cmake_minimum_required (VERSION 2.8.12)
project (client_endpoint_test)

add_executable(client_endpoint_test client_endpoint_test.cpp)

target_link_libraries(client_endpoint_test
                      boost_system
                    )

你可能感兴趣的:(tcp/ip,网络协议,网络)