以 ptp4l、E2E 为例的 Linuxptp 代码分析

最近在学习这部分内容,发现网上没有多少相关的代码分析的文章,结合自己的学习,和大家分享一下,希望大家批评指正!

linuxptp code

git clone git://git.code.sf.net/p/linuxptp/code linuxptp

ptp4l.c::main() 首先是处理命令行的参数,然后是 clock type,紧接着 clock_create,最终在 clock_poll中让端口处理他们的 events。根据不同的 clock type 有不同的 event,这里以 bc_event 为例。

port.c::bc_event
这部分我的描述可能不够准确,见谅。在我看来分为几个部分,第一个部分是 switch(fd_index),这部分是主动发出 msg,然后是中间的处理接收到的 msg 部分,最后是 switch(msg_type(msg)),这部分是收到 msg 后的回应。本文按照 E2E 的流程来分析,可以参考这个:1588的E2E链路延迟测量机制 这边还是建议对照着 E2E 的四步图来看,会更清楚一些。我根据本文的顺序也画了图,供参考。

以 ptp4l、E2E 为例的 Linuxptp 代码分析_第1张图片

本文目录(根据 E2E 四步)

  • 第一步和第二步
    • master 发送 Sync 和 Follow up
    • slave 接收 Sync
    • slave 接收 Follow up
  • 第三步
    • slave 发送 Delay_Req
  • 第四步
    • master 收到 Delay_Req 后立即发送 Delay_Resp
    • slave 收到 process_delay_resp
  • 总结语
  • 参考文献

第一步和第二步

master 发送 Sync 和 Follow up

master 发送 Sync 后立刻发送 Follow up
bc_event => port_tx_sync

int port_tx_sync(struct port *p, struct address *dst)
{
	//这里的 msg 就是 sync,fup 就是 follow up
	struct ptp_message *msg, *fup;
	int err, event;
	//首先判断 timestamping 的类型
	switch (p->timestamping) {
	case TS_SOFTWARE:
	case TS_LEGACY_HW:
	case TS_HARDWARE:
		event = TRANS_EVENT;
		break;
	case TS_ONESTEP:
		event = TRANS_ONESTEP;
		break;
	case TS_P2P1STEP:
		event = TRANS_P2P1STEP;
		break;
	default:
		return -1;
	}

	if (p->inhibit_multicast_service && !dst) {
		return 0;
	}
	if (!port_capable(p)) {
		return 0;
	}
	if (port_sync_incapable(p)) {
		return 0;
	}
	//alloc msg 和 fup
	msg = msg_allocate();
	if (!msg) {
		return -1;
	}
	fup = msg_allocate();
	if (!fup) {
		msg_put(msg);
		return -1;
	}
	//这一部分是 sync,设置 msg 的 hwts type 和 port 相同,然后设置 ptp packet header
	msg->hwts.type = p->timestamping;

	msg->header.tsmt               = SYNC | p->transportSpecific;
	msg->header.ver                = PTP_VERSION;
	msg->header.messageLength      = sizeof(struct sync_msg);
	msg->header.domainNumber       = clock_domain_number(p->clock);
	msg->header.sourcePortIdentity = p->portIdentity;
	msg->header.sequenceId         = p->seqnum.sync++;
	msg->header.control            = CTL_SYNC;
	msg->header.logMessageInterval = p->logSyncInterval;
	
	if (p->timestamping != TS_ONESTEP && p->timestamping != TS_P2P1STEP) {
		msg->header.flagField[0] |= TWO_STEP;
	}
	
	if (dst) {
		msg->address = *dst;
		msg->header.flagField[0] |= UNICAST;
		msg->header.logMessageInterval = 0x7f;
	}
	//这里就开始发送了,这里是先发送,后保存 t1,t1 将在 follow up msg 里发送给 slave
	err = port_prepare_and_send(p, msg, event);
	if (err) {
		pr_err("port %hu: send sync failed", portnum(p));
		goto out;
	}
	//如果是 onestep 到此为止,如果是 twostep,继续发送 follow up,不多赘述
	if (p->timestamping == TS_ONESTEP || p->timestamping == TS_P2P1STEP) {
		goto out;
	} else if (msg_sots_missing(msg)) {
		pr_err("missing timestamp on transmitted sync");
		err = -1;
		goto out;
	}

	/*
	 * Send the follow up message right away.
	 */
	fup->hwts.type = p->timestamping;

	fup->header.tsmt               = FOLLOW_UP | p->transportSpecific;
	fup->header.ver                = PTP_VERSION;
	fup->header.messageLength      = sizeof(struct follow_up_msg);
	fup->header.domainNumber       = clock_domain_number(p->clock);
	fup->header.sourcePortIdentity = p->portIdentity;
	fup->header.sequenceId         = p->seqnum.sync - 1;
	fup->header.control            = CTL_FOLLOW_UP;
	fup->header.logMessageInterval = p->logSyncInterval;
	//下面这一句是上面没有的,这一步是将上面得到的 ts 放入 follow up 的 msg 中,这个时刻就是 t1。
	fup->follow_up.preciseOriginTimestamp = tmv_to_Timestamp(msg->hwts.ts);

	if (dst) {
		fup->address = *dst;
		fup->header.flagField[0] |= UNICAST;
	}
	if (p->follow_up_info && follow_up_info_append(fup)) {
		pr_err("port %hu: append fup info failed", portnum(p));
		err = -1;
		goto out;
	}

	err = port_prepare_and_send(p, fup, TRANS_GENERAL);
	if (err) {
		pr_err("port %hu: send follow up failed", portnum(p));
	}
out:
	msg_put(msg);
	msg_put(fup);
	return err;
}
int port_prepare_and_send(struct port *p, struct ptp_message *msg,
			  enum transport_event event)
{
	int cnt;

	if (msg_pre_send(msg)) {
		return -1;
	}
	if (msg_unicast(msg)) {
		cnt = transport_sendto(p->trp, &p->fda, event, msg);
	} else {
		cnt = transport_send(p->trp, &p->fda, event, msg);
	}
	if (cnt <= 0) {
		return -1;
	}
	port_stats_inc_tx(p, msg);
	if (msg_sots_valid(msg)) {
		ts_add(&msg->hwts.ts, p->tx_timestamp_offset);
	}
	return 0;
}

int transport_send(struct transport *t, struct fdarray *fda,
		   enum transport_event event, struct ptp_message *msg)
{
	int len = ntohs(msg->header.messageLength);
	//这里的 send 也是有分类的;注意这里记录了发送时的hwts,如果是two step,就会用到。
	return t->send(t, fda, event, 0, msg, len, NULL, &msg->hwts);
}
//以 raw_send 为例
static int raw_send(struct transport *t, struct fdarray *fda,
		    enum transport_event event, int peer, void *buf, int len,
		    struct address *addr, struct hw_timestamp *hwts)
{
	struct raw *raw = container_of(t, struct raw, t);
	ssize_t cnt;
	unsigned char pkt[1600], *ptr = buf;
	struct eth_hdr *hdr;
	int fd = -1;

	switch (event) {
	case TRANS_GENERAL:
		fd = fda->fd[FD_GENERAL];
		break;
	case TRANS_EVENT:
	case TRANS_ONESTEP:
	case TRANS_P2P1STEP:
	case TRANS_DEFER_EVENT:
		fd = fda->fd[FD_EVENT];
		break;
	}

	ptr -= sizeof(*hdr);
	len += sizeof(*hdr);

	if (!addr)
		addr = peer ? &raw->p2p_addr : &raw->ptp_addr;

	hdr = (struct eth_hdr *) ptr;
	addr_to_mac(&hdr->dst, addr);
	addr_to_mac(&hdr->src, &raw->src_addr);

	hdr->type = htons(ETH_P_1588);
	//socket send
	cnt = send(fd, ptr, len, 0);
	if (cnt < 1) {
		return -errno;
	}
	/*
	 * Get the time stamp right away.
	 */
	return event == TRANS_EVENT ? sk_receive(fd, pkt, len, NULL, hwts, MSG_ERRQUEUE) : cnt;
}

slave 接收 Sync

到这里为止,已经发送了 Sync 和 Follow up,Follow up 中包含了 t1,然后 slave 收到相应的消息:
bc_event => process_sync

void process_sync(struct port *p, struct ptp_message *m)
{
	enum syfu_event event;
	switch (p->state) {
	case PS_INITIALIZING:
	case PS_FAULTY:
	case PS_DISABLED:
	case PS_LISTENING:
	case PS_PRE_MASTER:
	case PS_MASTER:
	case PS_GRAND_MASTER:
	case PS_PASSIVE:
		return;
	case PS_UNCALIBRATED:
	case PS_SLAVE:
		break;
	}

	if (check_source_identity(p, m)) {
		return;
	}

	if (!msg_unicast(m) &&
	    m->header.logMessageInterval != p->log_sync_interval) {
		p->log_sync_interval = m->header.logMessageInterval;
		clock_sync_interval(p->clock, p->log_sync_interval);
	}

	m->header.correction += p->asymmetry;
	//这里是one step的部分
	if (one_step(m)) {
		//如果是onestep,那么 sync msg 会携带它被发送时的 ts 来,所以 t1 和 t2 直接就有了
		port_synchronize(p, m->header.sequenceId,
				 m->hwts.ts, m->ts.pdu,
				 m->header.correction, 0,
				 m->header.logMessageInterval);
		flush_last_sync(p);
		return;
	}
	//这里应该是 p->syfu == SF_EMPTY,进而 event = SYNC_MISMATCH,这两个下面的 port_syfufsm() 会用到
	if (p->syfu == SF_HAVE_FUP &&
	    fup_sync_ok(p->last_syncfup, m) &&
	    p->last_syncfup->header.sequenceId == m->header.sequenceId) {
		event = SYNC_MATCH;
	} else {
		event = SYNC_MISMATCH;
	}
	
	port_syfufsm(p, event, m);
}

static void port_syfufsm(struct port *p, enum syfu_event event,
			 struct ptp_message *m)
{
	struct ptp_message *syn, *fup;

	switch (p->syfu) {
	case SF_EMPTY:
		switch (event) {
		case SYNC_MISMATCH:
			msg_get(m);
			//这里保存了 sync 这个 msg,另外还设置了 p->syfu = SF_HAVE_SYNC,下面会用到
			p->last_syncfup = m;
			p->syfu = SF_HAVE_SYNC;
			break;
		case FUP_MISMATCH:
			msg_get(m);
			p->last_syncfup = m;
			p->syfu = SF_HAVE_FUP;
			break;
		case SYNC_MATCH:
			break;
		case FUP_MATCH:
			break;
		}
		break;

	case SF_HAVE_SYNC:
		switch (event) {
		case SYNC_MISMATCH:
			port_syfufsm_print_mismatch(p, event, m);
			msg_put(p->last_syncfup);
			msg_get(m);
			p->last_syncfup = m;
			break;
		case SYNC_MATCH:
			break;
		case FUP_MISMATCH:
			port_syfufsm_print_mismatch(p, event, m);
			msg_put(p->last_syncfup);
			msg_get(m);
			p->last_syncfup = m;
			p->syfu = SF_HAVE_FUP;
			break;
		case FUP_MATCH:
			syn = p->last_syncfup;
			port_synchronize(p, syn->header.sequenceId,
					 syn->hwts.ts, m->ts.pdu,
					 syn->header.correction,
					 m->header.correction,
					 m->header.logMessageInterval);
			msg_put(p->last_syncfup);
			p->syfu = SF_EMPTY;
			break;
		}
		break;

	case SF_HAVE_FUP:
		switch (event) {
		case SYNC_MISMATCH:
			port_syfufsm_print_mismatch(p, event, m);
			msg_put(p->last_syncfup);
			msg_get(m);
			p->last_syncfup = m;
			p->syfu = SF_HAVE_SYNC;
			break;
		case SYNC_MATCH:
			fup = p->last_syncfup;
			port_synchronize(p, fup->header.sequenceId,
					 m->hwts.ts, fup->ts.pdu,
					 m->header.correction,
					 fup->header.correction,
					 m->header.logMessageInterval);
			msg_put(p->last_syncfup);
			p->syfu = SF_EMPTY;
			break;
		case FUP_MISMATCH:
			port_syfufsm_print_mismatch(p, event, m);
			msg_put(p->last_syncfup);
			msg_get(m);
			p->last_syncfup = m;
			break;
		case FUP_MATCH:
			break;
		}
		break;
	}
}

slave 接收 Follow up

bc_event => process_follow_up

void process_follow_up(struct port *p, struct ptp_message *m)
{
	enum syfu_event event;
	switch (p->state) {
	case PS_INITIALIZING:
	case PS_FAULTY:
	case PS_DISABLED:
	case PS_LISTENING:
	case PS_PRE_MASTER:
	case PS_MASTER:
	case PS_GRAND_MASTER:
	case PS_PASSIVE:
		return;
	case PS_UNCALIBRATED:
	case PS_SLAVE:
		break;
	}

	if (check_source_identity(p, m)) {
		return;
	}

	if (p->follow_up_info) {
		struct follow_up_info_tlv *fui = follow_up_info_extract(m);
		if (!fui)
			return;
		clock_follow_up_info(p->clock, fui);
	}
	//接上一个,p->syfu == SF_HAVE_SYNC,进而 event = FUP_MATCH
	if (p->syfu == SF_HAVE_SYNC &&
	    p->last_syncfup->header.sequenceId == m->header.sequenceId) {
		event = FUP_MATCH;
	} else {
		event = FUP_MISMATCH;
	}
	port_syfufsm(p, event, m);
}

static void port_syfufsm(struct port *p, enum syfu_event event,
			 struct ptp_message *m)
{
	struct ptp_message *syn, *fup;

	switch (p->syfu) {
	case SF_EMPTY:
		switch (event) {
		case SYNC_MISMATCH:
			msg_get(m);
			p->last_syncfup = m;
			p->syfu = SF_HAVE_SYNC;
			break;
		case FUP_MISMATCH:
			msg_get(m);
			p->last_syncfup = m;
			p->syfu = SF_HAVE_FUP;
			break;
		case SYNC_MATCH:
			break;
		case FUP_MATCH:
			break;
		}
		break;

	case SF_HAVE_SYNC:
		switch (event) {
		case SYNC_MISMATCH:
			port_syfufsm_print_mismatch(p, event, m);
			msg_put(p->last_syncfup);
			msg_get(m);
			p->last_syncfup = m;
			break;
		case SYNC_MATCH:
			break;
		case FUP_MISMATCH:
			port_syfufsm_print_mismatch(p, event, m);
			msg_put(p->last_syncfup);
			msg_get(m);
			p->last_syncfup = m;
			p->syfu = SF_HAVE_FUP;
			break;
		case FUP_MATCH:
			//获取上一个 msg,在上面一个里面保存了 p->last_syncfup = m;
			syn = p->last_syncfup;
			//syn->hwts.ts 这个应该是 t2,也就是 sync msg 到达 slave 的时间。m->ts.pdu 应该是 t1,也就是 follow 携带来的 ts。
			port_synchronize(p, syn->header.sequenceId,
					 syn->hwts.ts, m->ts.pdu,
					 syn->header.correction,
					 m->header.correction,
					 m->header.logMessageInterval);
			msg_put(p->last_syncfup);
			//这里改了 p->syfu,这样就形成了一个循环
			p->syfu = SF_EMPTY;
			break;
		}
		break;

	case SF_HAVE_FUP:
		switch (event) {
		case SYNC_MISMATCH:
			port_syfufsm_print_mismatch(p, event, m);
			msg_put(p->last_syncfup);
			msg_get(m);
			p->last_syncfup = m;
			p->syfu = SF_HAVE_SYNC;
			break;
		case SYNC_MATCH:
			fup = p->last_syncfup;
			port_synchronize(p, fup->header.sequenceId,
					 m->hwts.ts, fup->ts.pdu,
					 m->header.correction,
					 fup->header.correction,
					 m->header.logMessageInterval);
			msg_put(p->last_syncfup);
			p->syfu = SF_EMPTY;
			break;
		case FUP_MISMATCH:
			port_syfufsm_print_mismatch(p, event, m);
			msg_put(p->last_syncfup);
			msg_get(m);
			p->last_syncfup = m;
			break;
		case FUP_MATCH:
			break;
		}
		break;
	}
}

static void port_synchronize(struct port *p,
			     uint16_t seqid,
			     tmv_t ingress_ts,
			     struct timestamp origin_ts,
			     Integer64 correction1, Integer64 correction2,
			     Integer8 sync_interval)
{
	enum servo_state state, last_state;
	tmv_t t1, t1c, t2, c1, c2;

	port_set_sync_rx_tmo(p);
	//fup 携带的 t1
	t1 = timestamp_to_tmv(origin_ts);
	//ingress 获得 t2
	t2 = ingress_ts;
	c1 = correction_to_tmv(correction1);
	c2 = correction_to_tmv(correction2);
	t1c = tmv_add(t1, tmv_add(c1, c2));

	switch (p->state) {
	case PS_UNCALIBRATED:
	case PS_SLAVE:
		//这里主要是 record t1,t2
		monitor_sync(p->slave_event_monitor,
			     clock_parent_identity(p->clock), seqid,
			     t1, tmv_add(c1, c2), t2);
		break;
	default:
		break;
	}

	last_state = clock_servo_state(p->clock);
	state = clock_synchronize(p->clock, t2, t1c);
	switch (state) {
	case SERVO_UNLOCKED:
		port_dispatch(p, EV_SYNCHRONIZATION_FAULT, 0);
		if (servo_offset_threshold(clock_servo(p->clock)) != 0 &&
		    sync_interval != p->initialLogSyncInterval) {
			p->logPdelayReqInterval = p->logMinPdelayReqInterval;
			p->logSyncInterval = p->initialLogSyncInterval;
			port_tx_interval_request(p, SIGNAL_NO_CHANGE,
						 SIGNAL_SET_INITIAL,
						 SIGNAL_NO_CHANGE);
		}
		break;
	case SERVO_JUMP:
		port_dispatch(p, EV_SYNCHRONIZATION_FAULT, 0);
		flush_delay_req(p);
		if (p->peer_delay_req) {
			msg_put(p->peer_delay_req);
			p->peer_delay_req = NULL;
		}
		break;
	case SERVO_LOCKED:
		port_dispatch(p, EV_MASTER_CLOCK_SELECTED, 0);
		break;
	case SERVO_LOCKED_STABLE:
		message_interval_request(p, last_state, sync_interval);
		break;
	}
}

第三步

slave 发送 Delay_Req

slave 收到 master 的 msg 后,需要回复 Delay_Req
bc_event => port_delay_request

int port_delay_request(struct port *p)
{
	struct ptp_message *msg;

	/* Time to send a new request, forget current pdelay resp and fup */
	if (p->peer_delay_resp) {
		msg_put(p->peer_delay_resp);
		p->peer_delay_resp = NULL;
	}
	if (p->peer_delay_fup) {
		msg_put(p->peer_delay_fup);
		p->peer_delay_fup = NULL;
	}

	if (p->delayMechanism == DM_P2P) {
		return port_pdelay_request(p);
	}

	msg = msg_allocate();
	if (!msg) {
		return -1;
	}

	msg->hwts.type = p->timestamping;

	msg->header.tsmt               = DELAY_REQ | p->transportSpecific;
	msg->header.ver                = PTP_VERSION;
	msg->header.messageLength      = sizeof(struct delay_req_msg);
	msg->header.domainNumber       = clock_domain_number(p->clock);
	msg->header.correction         = -p->asymmetry;
	msg->header.sourcePortIdentity = p->portIdentity;
	msg->header.sequenceId         = p->seqnum.delayreq++;
	msg->header.control            = CTL_DELAY_REQ;
	msg->header.logMessageInterval = 0x7f;

	if (p->hybrid_e2e) {
		struct ptp_message *dst = TAILQ_FIRST(&p->best->messages);
		msg->address = dst->address;
		msg->header.flagField[0] |= UNICAST;
	}
	//这里仍会调用 sk_receive 保存 ts,即 t3,不过这里的 ts 并没有用
	if (port_prepare_and_send(p, msg, TRANS_EVENT)) {
		pr_err("port %hu: send delay request failed", portnum(p));
		goto out;
	}
	if (msg_sots_missing(msg)) {
		pr_err("missing timestamp on transmitted delay request");
		goto out;
	}

	TAILQ_INSERT_HEAD(&p->delay_req, msg, list);

	return 0;
out:
	msg_put(msg);
	return -1;
}

第四步

master 收到 Delay_Req 后立即发送 Delay_Resp

master 收到 Delay_Req 后立即发送 Delay_Resp,这一步的目的是为了把 t4 发送给 slave。

static int process_delay_req(struct port *p, struct ptp_message *m)
{
	int err, nsm, saved_seqnum_sync;
	struct ptp_message *msg;

	nsm = port_nsm_reply(p, m);

	if (!nsm && p->state != PS_MASTER && p->state != PS_GRAND_MASTER) {
		return 0;
	}

	if (p->delayMechanism == DM_P2P) {
		pr_warning("port %hu: delay request on P2P port", portnum(p));
		return 0;
	}

	msg = msg_allocate();
	if (!msg) {
		return -1;
	}

	msg->hwts.type = p->timestamping;

	msg->header.tsmt               = DELAY_RESP | p->transportSpecific;
	msg->header.ver                = PTP_VERSION;
	msg->header.messageLength      = sizeof(struct delay_resp_msg);
	msg->header.domainNumber       = m->header.domainNumber;
	msg->header.correction         = m->header.correction;
	msg->header.sourcePortIdentity = p->portIdentity;
	msg->header.sequenceId         = m->header.sequenceId;
	msg->header.control            = CTL_DELAY_RESP;
	msg->header.logMessageInterval = p->logMinDelayReqInterval;
	//这一步需要将 t4 放到 Delay_Resp msg 中,收到的 msg 的 ts 就是 t4
	msg->delay_resp.receiveTimestamp = tmv_to_Timestamp(m->hwts.ts);

	msg->delay_resp.requestingPortIdentity = m->header.sourcePortIdentity;

	if (p->hybrid_e2e && msg_unicast(m)) {
		msg->address = m->address;
		msg->header.flagField[0] |= UNICAST;
		msg->header.logMessageInterval = 0x7f;
	}
	if (nsm && net_sync_resp_append(p, msg)) {
		pr_err("port %hu: append NSM failed", portnum(p));
		err = -1;
		goto out;
	}
	err = port_prepare_and_send(p, msg, TRANS_GENERAL);
	if (err) {
		pr_err("port %hu: send delay response failed", portnum(p));
		goto out;
	}
	if (nsm) {
		saved_seqnum_sync = p->seqnum.sync;
		p->seqnum.sync = m->header.sequenceId;
		err = port_tx_sync(p, &m->address);
		p->seqnum.sync = saved_seqnum_sync;
	}
out:
	msg_put(msg);
	return err;
}

slave 收到 process_delay_resp

master 发送 Delay_Resp 后,slave 接受该 msg 后,得到 t4,至此,slave 拥有了 t1 ~ t4。

void process_delay_resp(struct port *p, struct ptp_message *m)
{
	struct delay_resp_msg *rsp = &m->delay_resp;
	struct ptp_message *req;
	tmv_t c3, t3, t4, t4c;

	if (p->state != PS_UNCALIBRATED && p->state != PS_SLAVE) {
		return;
	}
	if (!pid_eq(&rsp->requestingPortIdentity, &p->portIdentity)) {
		return;
	}
	if (check_source_identity(p, m)) {
		return;
	}
	//这里是为了找到发送 Delay_Req 的那个 msg
	TAILQ_FOREACH(req, &p->delay_req, list) {
		if (rsp->hdr.sequenceId == ntohs(req->delay_req.hdr.sequenceId)) {
			break;
		}
	}
	if (!req) {
		return;
	}

	c3 = correction_to_tmv(m->header.correction);
	//得到 t3
	t3 = req->hwts.ts;
	//t4 从 Delay_Resp msg 中获得
	t4 = timestamp_to_tmv(m->ts.pdu);
	t4c = tmv_sub(t4, c3);
	//record t3 和 t4
	monitor_delay(p->slave_event_monitor, clock_parent_identity(p->clock),
		      m->header.sequenceId, t3, c3, t4);

	clock_path_delay(p->clock, t3, t4c);

	TAILQ_REMOVE(&p->delay_req, req, list);
	msg_put(req);

	if (p->logMinDelayReqInterval == rsp->hdr.logMessageInterval) {
		return;
	}
	if (msg_unicast(m)) {
		/* Unicast responses have logMinDelayReqInterval set to 0x7F. */
		return;
	}
	if (rsp->hdr.logMessageInterval < -10 ||
	    rsp->hdr.logMessageInterval > 22) {
		pl_info(300, "port %hu: ignore bogus delay request interval 2^%d",
			portnum(p), rsp->hdr.logMessageInterval);
		return;
	}
	p->logMinDelayReqInterval = rsp->hdr.logMessageInterval;
	pr_notice("port %hu: minimum delay request interval 2^%d",
		  portnum(p), p->logMinDelayReqInterval);
	port_set_delay_tmo(p);
}

总结语

至此,E2E 的四步就已经分析完了,要学习 linuxptp 的背景和原理,可以 Google 更多的文章学习,在跑 ptp4l hardware mode 的过程中可能会遇到一些问题,有机会还会再开一篇文章。最近补充了一些内容,放在后续的文章里:补充:以 ptp4l、E2E 为例的 Linuxptp 代码分析

如果觉得这篇文章有用的话,可以点赞、评论或者收藏,万分感谢,goodbye~

参考文献

Implementing IEEE 1588v2 for use in the mobile backhaul

你可能感兴趣的:(linuxptp,linux,c语言,驱动程序,网络)