熟悉伺服电机控制的朋友大概都知道,一般伺服电机控制系统,无非就是电机<->驱动器<->控制器<->用户程序。EtherCAT master用在这里大体上相当于控制器,但与传统的控制器又不大一样。传统控制器一般都是输出脉冲,然后通过编码器等进行反馈,当然,也有直接用模拟量来做的。大体上来说,我们需要电机的状态(ON/OFF,启停,Limit,到位,home等),速度,位置,位移,等等数据作为IO。传统控制器需要获取这些信息,直接操作内部的资源就可以,譬如IO等等,底层基本上都是挂载在地址空间上的寄存器之类。用户程序与之交互,也只需要PCI/PCIE等,大多数情况,其延时可以忽略。
EtherCAT master输入输出走的都是网口。这的确带来了很多便利,但也带来了问题,最主要的问题一个是,要想像以前操作本地地址空间的寄存器一样,操作目标设备(譬如伺服电机),就得通过某种机制完成从远处的目标设备到本地内存空间的映射。其次,就是时间。工控系统对于时间的确定性是有比较苛刻的要求的。关于时间的处理我们以后再说,这里主要先看看数据的同步。
先来看看ec_SDOwrite和ecx_SDOread,当然,这里把俄罗斯套娃去掉了,直接找里面的函数:
int ecx_SDOwrite(ecx_contextt *context, uint16 Slave, uint16 Index, uint8 SubIndex,
boolean CA, int psize, const void *p, int Timeout)
{
ec_SDOt *SDOp, *aSDOp;
int wkc, maxdata, framedatasize;
ec_mbxbuft MbxIn, MbxOut;
uint8 cnt, toggle;
boolean NotLast;
const uint8 *hp;
ec_clearmbx(&MbxIn);
/* Empty slave out mailbox if something is in. Timeout set to 0 */
wkc = ecx_mbxreceive(context, Slave, (ec_mbxbuft *)&MbxIn, 0);
ec_clearmbx(&MbxOut);
aSDOp = (ec_SDOt *)&MbxIn;
SDOp = (ec_SDOt *)&MbxOut;
maxdata = context->slavelist[Slave].mbx_l - 0x10; /* data section=mailbox size - 6 mbx - 2 CoE - 8 sdo req */
/* if small data use expedited transfer */
if ((psize <= 4) && !CA)
{
SDOp->MbxHeader.length = htoes(0x000a);
SDOp->MbxHeader.address = htoes(0x0000);
SDOp->MbxHeader.priority = 0x00;
/* get new mailbox counter, used for session handle */
cnt = ec_nextmbxcnt(context->slavelist[Slave].mbx_cnt);
context->slavelist[Slave].mbx_cnt = cnt;
SDOp->MbxHeader.mbxtype = ECT_MBXT_COE + MBX_HDR_SET_CNT(cnt); /* CoE */
SDOp->CANOpen = htoes(0x000 + (ECT_COES_SDOREQ << 12)); /* number 9bits service upper 4 bits */
SDOp->Command = ECT_SDO_DOWN_EXP | (((4 - psize) << 2) & 0x0c); /* expedited SDO download transfer */
SDOp->Index = htoes(Index);
SDOp->SubIndex = SubIndex;
hp = p;
/* copy parameter data to mailbox */
memcpy(&SDOp->ldata[0], hp, psize);
/* send mailbox SDO download request to slave */
wkc = ecx_mbxsend(context, Slave, (ec_mbxbuft *)&MbxOut, EC_TIMEOUTTXM);
if (wkc > 0)
{
ec_clearmbx(&MbxIn);
/* read slave response */
wkc = ecx_mbxreceive(context, Slave, (ec_mbxbuft *)&MbxIn, Timeout);
if (wkc > 0)
{
/* response should be CoE, SDO response, correct index and subindex */
if (((aSDOp->MbxHeader.mbxtype & 0x0f) == ECT_MBXT_COE) &&
((etohs(aSDOp->CANOpen) >> 12) == ECT_COES_SDORES) &&
(aSDOp->Index == SDOp->Index) &&
(aSDOp->SubIndex == SDOp->SubIndex))
{
/* all OK */
}
/* unexpected response from slave */
else
{
if (aSDOp->Command == ECT_SDO_ABORT) /* SDO abort frame received */
{
ecx_SDOerror(context, Slave, Index, SubIndex, etohl(aSDOp->ldata[0]));
}
else
{
ecx_packeterror(context, Slave, Index, SubIndex, 1); /* Unexpected frame returned */
}
wkc = 0;
}
}
}
}
else
{
framedatasize = psize;
NotLast = FALSE;
if (framedatasize > maxdata)
{
framedatasize = maxdata; /* segmented transfer needed */
NotLast = TRUE;
}
SDOp->MbxHeader.length = htoes((uint16)(0x0a + framedatasize));
SDOp->MbxHeader.address = htoes(0x0000);
SDOp->MbxHeader.priority = 0x00;
/* get new mailbox counter, used for session handle */
cnt = ec_nextmbxcnt(context->slavelist[Slave].mbx_cnt);
context->slavelist[Slave].mbx_cnt = cnt;
SDOp->MbxHeader.mbxtype = ECT_MBXT_COE + MBX_HDR_SET_CNT(cnt); /* CoE */
SDOp->CANOpen = htoes(0x000 + (ECT_COES_SDOREQ << 12)); /* number 9bits service upper 4 bits */
if (CA)
{
SDOp->Command = ECT_SDO_DOWN_INIT_CA; /* Complete Access, normal SDO init download transfer */
}
else
{
SDOp->Command = ECT_SDO_DOWN_INIT; /* normal SDO init download transfer */
}
SDOp->Index = htoes(Index);
SDOp->SubIndex = SubIndex;
if (CA && (SubIndex > 1))
{
SDOp->SubIndex = 1;
}
SDOp->ldata[0] = htoel(psize);
hp = p;
/* copy parameter data to mailbox */
memcpy(&SDOp->ldata[1], hp, framedatasize);
hp += framedatasize;
psize -= framedatasize;
/* send mailbox SDO download request to slave */
wkc = ecx_mbxsend(context, Slave, (ec_mbxbuft *)&MbxOut, EC_TIMEOUTTXM);
if (wkc > 0)
{
ec_clearmbx(&MbxIn);
/* read slave response */
wkc = ecx_mbxreceive(context, Slave, (ec_mbxbuft *)&MbxIn, Timeout);
if (wkc > 0)
{
/* response should be CoE, SDO response, correct index and subindex */
if (((aSDOp->MbxHeader.mbxtype & 0x0f) == ECT_MBXT_COE) &&
((etohs(aSDOp->CANOpen) >> 12) == ECT_COES_SDORES) &&
(aSDOp->Index == SDOp->Index) &&
(aSDOp->SubIndex == SDOp->SubIndex))
{
/* all ok */
maxdata += 7;
toggle = 0;
/* repeat while segments left */
while (NotLast)
{
SDOp = (ec_SDOt *)&MbxOut;
framedatasize = psize;
NotLast = FALSE;
SDOp->Command = 0x01; /* last segment */
if (framedatasize > maxdata)
{
framedatasize = maxdata; /* more segments needed */
NotLast = TRUE;
SDOp->Command = 0x00; /* segments follow */
}
if (!NotLast && (framedatasize < 7))
{
SDOp->MbxHeader.length = htoes(0x0a); /* minimum size */
SDOp->Command = (uint8)(0x01 + ((7 - framedatasize) << 1)); /* last segment reduced octets */
}
else
{
SDOp->MbxHeader.length = htoes((uint16)(framedatasize + 3)); /* data + 2 CoE + 1 SDO */
}
SDOp->MbxHeader.address = htoes(0x0000);
SDOp->MbxHeader.priority = 0x00;
/* get new mailbox counter value */
cnt = ec_nextmbxcnt(context->slavelist[Slave].mbx_cnt);
context->slavelist[Slave].mbx_cnt = cnt;
SDOp->MbxHeader.mbxtype = ECT_MBXT_COE + MBX_HDR_SET_CNT(cnt); /* CoE */
SDOp->CANOpen = htoes(0x000 + (ECT_COES_SDOREQ << 12)); /* number 9bits service upper 4 bits (SDO request) */
SDOp->Command = SDOp->Command + toggle; /* add toggle bit to command byte */
/* copy parameter data to mailbox */
memcpy(&SDOp->Index, hp, framedatasize);
/* update parameter buffer pointer */
hp += framedatasize;
psize -= framedatasize;
/* send SDO download request */
wkc = ecx_mbxsend(context, Slave, (ec_mbxbuft *)&MbxOut, EC_TIMEOUTTXM);
if (wkc > 0)
{
ec_clearmbx(&MbxIn);
/* read slave response */
wkc = ecx_mbxreceive(context, Slave, (ec_mbxbuft *)&MbxIn, Timeout);
if (wkc > 0)
{
if (((aSDOp->MbxHeader.mbxtype & 0x0f) == ECT_MBXT_COE) &&
((etohs(aSDOp->CANOpen) >> 12) == ECT_COES_SDORES) &&
((aSDOp->Command & 0xe0) == 0x20))
{
/* all OK, nothing to do */
}
else
{
if (aSDOp->Command == ECT_SDO_ABORT) /* SDO abort frame received */
{
ecx_SDOerror(context, Slave, Index, SubIndex, etohl(aSDOp->ldata[0]));
}
else
{
ecx_packeterror(context, Slave, Index, SubIndex, 1); /* Unexpected frame returned */
}
wkc = 0;
NotLast = FALSE;
}
}
}
toggle = toggle ^ 0x10; /* toggle bit for segment request */
}
}
/* unexpected response from slave */
else
{
if (aSDOp->Command == ECT_SDO_ABORT) /* SDO abort frame received */
{
ecx_SDOerror(context, Slave, Index, SubIndex, etohl(aSDOp->ldata[0]));
}
else
{
ecx_packeterror(context, Slave, Index, SubIndex, 1); /* Unexpected frame returned */
}
wkc = 0;
}
}
}
}
return wkc;
}
int ecx_SDOread(ecx_contextt *context, uint16 slave, uint16 index, uint8 subindex,
boolean CA, int *psize, void *p, int timeout)
{
ec_SDOt *SDOp, *aSDOp;
uint16 bytesize, Framedatasize;
int wkc;
int32 SDOlen;
uint8 *bp;
uint8 *hp;
ec_mbxbuft MbxIn, MbxOut;
uint8 cnt, toggle;
boolean NotLast;
ec_clearmbx(&MbxIn);
/* Empty slave out mailbox if something is in. Timeout set to 0 */
wkc = ecx_mbxreceive(context, slave, (ec_mbxbuft *)&MbxIn, 0);
ec_clearmbx(&MbxOut);
aSDOp = (ec_SDOt *)&MbxIn;
SDOp = (ec_SDOt *)&MbxOut;
SDOp->MbxHeader.length = htoes(0x000a);
SDOp->MbxHeader.address = htoes(0x0000);
SDOp->MbxHeader.priority = 0x00;
/* get new mailbox count value, used as session handle */
cnt = ec_nextmbxcnt(context->slavelist[slave].mbx_cnt);
context->slavelist[slave].mbx_cnt = cnt;
SDOp->MbxHeader.mbxtype = ECT_MBXT_COE + MBX_HDR_SET_CNT(cnt); /* CoE */
SDOp->CANOpen = htoes(0x000 + (ECT_COES_SDOREQ << 12)); /* number 9bits service upper 4 bits (SDO request) */
if (CA)
{
SDOp->Command = ECT_SDO_UP_REQ_CA; /* upload request complete access */
}
else
{
SDOp->Command = ECT_SDO_UP_REQ; /* upload request normal */
}
SDOp->Index = htoes(index);
if (CA && (subindex > 1))
{
subindex = 1;
}
SDOp->SubIndex = subindex;
SDOp->ldata[0] = 0;
/* send CoE request to slave */
wkc = ecx_mbxsend(context, slave, (ec_mbxbuft *)&MbxOut, EC_TIMEOUTTXM);
if (wkc > 0) /* succeeded to place mailbox in slave ? */
{
/* clean mailboxbuffer */
ec_clearmbx(&MbxIn);
/* read slave response */
wkc = ecx_mbxreceive(context, slave, (ec_mbxbuft *)&MbxIn, timeout);
if (wkc > 0) /* succeeded to read slave response ? */
{
/* slave response should be CoE, SDO response and the correct index */
if (((aSDOp->MbxHeader.mbxtype & 0x0f) == ECT_MBXT_COE) &&
((etohs(aSDOp->CANOpen) >> 12) == ECT_COES_SDORES) &&
(aSDOp->Index == SDOp->Index))
{
if ((aSDOp->Command & 0x02) > 0)
{
/* expedited frame response */
bytesize = 4 - ((aSDOp->Command >> 2) & 0x03);
if (*psize >= bytesize) /* parameter buffer big enough ? */
{
/* copy parameter in parameter buffer */
memcpy(p, &aSDOp->ldata[0], bytesize);
/* return the real parameter size */
*psize = bytesize;
}
else
{
wkc = 0;
ecx_packeterror(context, slave, index, subindex, 3); /* data container too small for type */
}
}
else
{ /* normal frame response */
SDOlen = etohl(aSDOp->ldata[0]);
/* Does parameter fit in parameter buffer ? */
if (SDOlen <= *psize)
{
bp = p;
hp = p;
/* calculate mailbox transfer size */
Framedatasize = (etohs(aSDOp->MbxHeader.length) - 10);
if (Framedatasize < SDOlen) /* transfer in segments? */
{
/* copy parameter data in parameter buffer */
memcpy(hp, &aSDOp->ldata[1], Framedatasize);
/* increment buffer pointer */
hp += Framedatasize;
*psize = Framedatasize;
NotLast = TRUE;
toggle= 0x00;
while (NotLast) /* segmented transfer */
{
SDOp = (ec_SDOt *)&MbxOut;
SDOp->MbxHeader.length = htoes(0x000a);
SDOp->MbxHeader.address = htoes(0x0000);
SDOp->MbxHeader.priority = 0x00;
cnt = ec_nextmbxcnt(context->slavelist[slave].mbx_cnt);
context->slavelist[slave].mbx_cnt = cnt;
SDOp->MbxHeader.mbxtype = ECT_MBXT_COE + MBX_HDR_SET_CNT(cnt); /* CoE */
SDOp->CANOpen = htoes(0x000 + (ECT_COES_SDOREQ << 12)); /* number 9bits service upper 4 bits (SDO request) */
SDOp->Command = ECT_SDO_SEG_UP_REQ + toggle; /* segment upload request */
SDOp->Index = htoes(index);
SDOp->SubIndex = subindex;
SDOp->ldata[0] = 0;
/* send segmented upload request to slave */
wkc = ecx_mbxsend(context, slave, (ec_mbxbuft *)&MbxOut, EC_TIMEOUTTXM);
/* is mailbox transferred to slave ? */
if (wkc > 0)
{
ec_clearmbx(&MbxIn);
/* read slave response */
wkc = ecx_mbxreceive(context, slave, (ec_mbxbuft *)&MbxIn, timeout);
/* has slave responded ? */
if (wkc > 0)
{
/* slave response should be CoE, SDO response */
if ((((aSDOp->MbxHeader.mbxtype & 0x0f) == ECT_MBXT_COE) &&
((etohs(aSDOp->CANOpen) >> 12) == ECT_COES_SDORES) &&
((aSDOp->Command & 0xe0) == 0x00)))
{
/* calculate mailbox transfer size */
Framedatasize = etohs(aSDOp->MbxHeader.length) - 3;
if ((aSDOp->Command & 0x01) > 0)
{ /* last segment */
NotLast = FALSE;
if (Framedatasize == 7)
/* subtract unused bytes from frame */
Framedatasize = Framedatasize - ((aSDOp->Command & 0x0e) >> 1);
/* copy to parameter buffer */
memcpy(hp, &(aSDOp->Index), Framedatasize);
}
else /* segments follow */
{
/* copy to parameter buffer */
memcpy(hp, &(aSDOp->Index), Framedatasize);
/* increment buffer pointer */
hp += Framedatasize;
}
/* update parameter size */
*psize += Framedatasize;
}
/* unexpected frame returned from slave */
else
{
NotLast = FALSE;
if ((aSDOp->Command) == ECT_SDO_ABORT) /* SDO abort frame received */
ecx_SDOerror(context, slave, index, subindex, etohl(aSDOp->ldata[0]));
else
ecx_packeterror(context, slave, index, subindex, 1); /* Unexpected frame returned */
wkc = 0;
}
}
}
toggle = toggle ^ 0x10; /* toggle bit for segment request */
}
}
/* non segmented transfer */
else
{
/* copy to parameter buffer */
memcpy(bp, &aSDOp->ldata[1], SDOlen);
*psize = SDOlen;
}
}
/* parameter buffer too small */
else
{
wkc = 0;
ecx_packeterror(context, slave, index, subindex, 3); /* data container too small for type */
}
}
}
/* other slave response */
else
{
if ((aSDOp->Command) == ECT_SDO_ABORT) /* SDO abort frame received */
{
ecx_SDOerror(context, slave, index, subindex, etohl(aSDOp->ldata[0]));
}
else
{
ecx_packeterror(context, slave, index, subindex, 1); /* Unexpected frame returned */
}
wkc = 0;
}
}
}
return wkc;
}
大体上,SDO的操作是基于COE的邮箱机制,其流程大体上就是,先把目标节点的邮箱当中等待返回的内容读回来,然后等待目标节点SM0状态寄存器当中的相应标志,算是将其清空,这是为了后续处理的时候,避免将之前的数据当作当前需要的数据进行处理。
然后,组包发邮箱(邮箱数据发出去之后,又等待了对方的确认应答),发出去之后,再等待返回。这个过程中经过了多次握手。
SDORead的过程与之类似,只是具体的命令字和打包的内容有一些区别。
由于有多次握手的存在,这个过程效率是比较低的。
于是,我们看到了类似如下的调用:
int EL7031setup(uint16 slave)
{
int retval;
uint16 u16val;
// map velocity
uint16 map_1c12[4] = { 0x0003, 0x1601, 0x1602, 0x1604 };
uint16 map_1c13[3] = { 0x0002, 0x1a01, 0x1a03 };
retval = 0;
// Set PDO mapping using Complete Access
// Strange, writing CA works, reading CA doesn't
// This is a protocol error of the slave.
retval += ec_SDOwrite(slave, 0x1c12, 0x00, TRUE, sizeof(map_1c12), &map_1c12, EC_TIMEOUTSAFE);
retval += ec_SDOwrite(slave, 0x1c13, 0x00, TRUE, sizeof(map_1c13), &map_1c13, EC_TIMEOUTSAFE);
// bug in EL7031 old firmware, CompleteAccess for reading is not supported even if the slave says it is.
ec_slave[slave].CoEdetails &= ~ECT_COEDET_SDOCA;
// set some motor parameters, just as example
u16val = 1200; // max motor current in mA
u16val = 150; // motor coil resistance in 0.01ohm
// set other nescessary parameters as needed
// .....
while (EcatError) printf("%s", ec_elist2string());
printf("EL7031 slave %d set, retval = %d\n", slave, retval);
return 1;
}
这个函数我们稍后还会涉及。
简单来说,SDOWrite与SDORead在这里主要用来进行一些不常操作的寄存器的。当然,这个是逻辑上的寄存器,对应于一个在逻辑上分配的地址。至于这个逻辑地址和寄存器,在目标节点上如何解析,就是目标节点的事情了。
至于控制过程中需要频繁操作的过程数据,当然不可能采用这种方式。于是,SOEM提供了这样两个函数:
ec_send_processdata
ec_receive_processdata
对,这两个函数的作用就是字面的意思,传递过程数据。
老规矩,去掉俄罗斯套娃,发送数据的过程是这样的:
static int ecx_main_send_processdata(ecx_contextt *context, uint8 group, boolean use_overlap_io)
{
uint32 LogAdr;
uint16 w1, w2;
int length;
uint16 sublength;
uint8 idx;
int wkc;
uint8* data;
boolean first=FALSE;
uint16 currentsegment = 0;
uint32 iomapinputoffset;
uint16 DCO;
wkc = 0;
if(context->grouplist[group].hasdc)
{
first = TRUE;
}
/* For overlapping IO map use the biggest */
if(use_overlap_io == TRUE)
{
/* For overlap IOmap make the frame EQ big to biggest part */
length = (context->grouplist[group].Obytes > context->grouplist[group].Ibytes) ?
context->grouplist[group].Obytes : context->grouplist[group].Ibytes;
/* Save the offset used to compensate where to save inputs when frame returns */
iomapinputoffset = context->grouplist[group].Obytes;
}
else
{
length = context->grouplist[group].Obytes + context->grouplist[group].Ibytes;
iomapinputoffset = 0;
}
LogAdr = context->grouplist[group].logstartaddr;
if(length)
{
wkc = 1;
/* LRW blocked by one or more slaves ? */
if(context->grouplist[group].blockLRW)
{
/* if inputs available generate LRD */
if(context->grouplist[group].Ibytes)
{
currentsegment = context->grouplist[group].Isegment;
data = context->grouplist[group].inputs;
length = context->grouplist[group].Ibytes;
LogAdr += context->grouplist[group].Obytes;
/* segment transfer if needed */
do
{
if(currentsegment == context->grouplist[group].Isegment)
{
sublength = (uint16)(context->grouplist[group].IOsegment[currentsegment++] - context->grouplist[group].Ioffset);
}
else
{
sublength = (uint16)context->grouplist[group].IOsegment[currentsegment++];
}
/* get new index */
idx = ecx_getindex(context->port);
w1 = LO_WORD(LogAdr);
w2 = HI_WORD(LogAdr);
DCO = 0;
ecx_setupdatagram(context->port, &(context->port->txbuf[idx]), EC_CMD_LRD, idx, w1, w2, sublength, data);
if(first)
{
/* FPRMW in second datagram */
DCO = ecx_adddatagram(context->port, &(context->port->txbuf[idx]), EC_CMD_FRMW, idx, FALSE,
context->slavelist[context->grouplist[group].DCnext].configadr,
ECT_REG_DCSYSTIME, sizeof(int64), context->DCtime);
first = FALSE;
}
/* send frame */
ecx_outframe_red(context->port, idx);
/* push index and data pointer on stack */
ecx_pushindex(context, idx, data, sublength, DCO);
length -= sublength;
LogAdr += sublength;
data += sublength;
} while (length && (currentsegment < context->grouplist[group].nsegments));
}
/* if outputs available generate LWR */
if(context->grouplist[group].Obytes)
{
data = context->grouplist[group].outputs;
length = context->grouplist[group].Obytes;
LogAdr = context->grouplist[group].logstartaddr;
currentsegment = 0;
/* segment transfer if needed */
do
{
sublength = (uint16)context->grouplist[group].IOsegment[currentsegment++];
if((length - sublength) < 0)
{
sublength = (uint16)length;
}
/* get new index */
idx = ecx_getindex(context->port);
w1 = LO_WORD(LogAdr);
w2 = HI_WORD(LogAdr);
DCO = 0;
ecx_setupdatagram(context->port, &(context->port->txbuf[idx]), EC_CMD_LWR, idx, w1, w2, sublength, data);
if(first)
{
/* FPRMW in second datagram */
DCO = ecx_adddatagram(context->port, &(context->port->txbuf[idx]), EC_CMD_FRMW, idx, FALSE,
context->slavelist[context->grouplist[group].DCnext].configadr,
ECT_REG_DCSYSTIME, sizeof(int64), context->DCtime);
first = FALSE;
}
/* send frame */
ecx_outframe_red(context->port, idx);
/* push index and data pointer on stack */
ecx_pushindex(context, idx, data, sublength, DCO);
length -= sublength;
LogAdr += sublength;
data += sublength;
} while (length && (currentsegment < context->grouplist[group].nsegments));
}
}
/* LRW can be used */
else
{
if (context->grouplist[group].Obytes)
{
data = context->grouplist[group].outputs;
}
else
{
data = context->grouplist[group].inputs;
/* Clear offset, don't compensate for overlapping IOmap if we only got inputs */
iomapinputoffset = 0;
}
/* segment transfer if needed */
do
{
sublength = (uint16)context->grouplist[group].IOsegment[currentsegment++];
/* get new index */
idx = ecx_getindex(context->port);
w1 = LO_WORD(LogAdr);
w2 = HI_WORD(LogAdr);
DCO = 0;
ecx_setupdatagram(context->port, &(context->port->txbuf[idx]), EC_CMD_LRW, idx, w1, w2, sublength, data);
if(first)
{
/* FPRMW in second datagram */
DCO = ecx_adddatagram(context->port, &(context->port->txbuf[idx]), EC_CMD_FRMW, idx, FALSE,
context->slavelist[context->grouplist[group].DCnext].configadr,
ECT_REG_DCSYSTIME, sizeof(int64), context->DCtime);
first = FALSE;
}
/* send frame */
ecx_outframe_red(context->port, idx);
/* push index and data pointer on stack.
* the iomapinputoffset compensate for where the inputs are stored
* in the IOmap if we use an overlapping IOmap. If a regular IOmap
* is used it should always be 0.
*/
ecx_pushindex(context, idx, (data + iomapinputoffset), sublength, DCO);
length -= sublength;
LogAdr += sublength;
data += sublength;
} while (length && (currentsegment < context->grouplist[group].nsegments));
}
}
return wkc;
}
这里面,大体上就是经过各种组包,把数据用ecx_outframe_red函数发出去。这个ecx_outframe_red函数基本上就是一些发数据和缓冲区操作。稍微值得注意的是这个index,这个后续再说。
与之对应的接收函数:
int ecx_receive_processdata_group(ecx_contextt *context, uint8 group, int timeout)
{
uint8 idx;
int pos;
int wkc = 0, wkc2;
uint16 le_wkc = 0;
int valid_wkc = 0;
int64 le_DCtime;
ec_idxstackT *idxstack;
ec_bufT *rxbuf;
/* just to prevent compiler warning for unused group */
wkc2 = group;
idxstack = context->idxstack;
rxbuf = context->port->rxbuf;
/* get first index */
pos = ecx_pullindex(context);
/* read the same number of frames as send */
while (pos >= 0)
{
idx = idxstack->idx[pos];
wkc2 = ecx_waitinframe(context->port, idx, timeout);
/* check if there is input data in frame */
if (wkc2 > EC_NOFRAME)
{
if((rxbuf[idx][EC_CMDOFFSET]==EC_CMD_LRD) || (rxbuf[idx][EC_CMDOFFSET]==EC_CMD_LRW))
{
if(idxstack->dcoffset[pos] > 0)
{
memcpy(idxstack->data[pos], &(rxbuf[idx][EC_HEADERSIZE]), idxstack->length[pos]);
memcpy(&le_wkc, &(rxbuf[idx][EC_HEADERSIZE + idxstack->length[pos]]), EC_WKCSIZE);
wkc = etohs(le_wkc);
memcpy(&le_DCtime, &(rxbuf[idx][idxstack->dcoffset[pos]]), sizeof(le_DCtime));
*(context->DCtime) = etohll(le_DCtime);
}
else
{
/* copy input data back to process data buffer */
memcpy(idxstack->data[pos], &(rxbuf[idx][EC_HEADERSIZE]), idxstack->length[pos]);
wkc += wkc2;
}
valid_wkc = 1;
}
else if(rxbuf[idx][EC_CMDOFFSET]==EC_CMD_LWR)
{
if(idxstack->dcoffset[pos] > 0)
{
memcpy(&le_wkc, &(rxbuf[idx][EC_HEADERSIZE + idxstack->length[pos]]), EC_WKCSIZE);
/* output WKC counts 2 times when using LRW, emulate the same for LWR */
wkc = etohs(le_wkc) * 2;
memcpy(&le_DCtime, &(rxbuf[idx][idxstack->dcoffset[pos]]), sizeof(le_DCtime));
*(context->DCtime) = etohll(le_DCtime);
}
else
{
/* output WKC counts 2 times when using LRW, emulate the same for LWR */
wkc += wkc2 * 2;
}
valid_wkc = 1;
}
}
/* release buffer */
ecx_setbufstat(context->port, idx, EC_BUF_EMPTY);
/* get next index */
pos = ecx_pullindex(context);
}
ecx_clearindex(context);
/* if no frames has arrived */
if (valid_wkc == 0)
{
return EC_NOFRAME;
}
return wkc;
}
数据封包的过程大概看一下EtherCAT的格式就可以理解,尤其这两个函数:
int ecx_setupdatagram(ecx_portt *port, void *frame, uint8 com, uint8 idx, uint16 ADP, uint16 ADO, uint16 length, void *data)
{
ec_comt *datagramP;
uint8 *frameP;
frameP = frame;
/* Ethernet header is preset and fixed in frame buffers
EtherCAT header needs to be added after that */
datagramP = (ec_comt*)&frameP[ETH_HEADERSIZE];
datagramP->elength = htoes(EC_ECATTYPE + EC_HEADERSIZE + length);
datagramP->command = com;
datagramP->index = idx;
datagramP->ADP = htoes(ADP);
datagramP->ADO = htoes(ADO);
datagramP->dlength = htoes(length);
ecx_writedatagramdata(&frameP[ETH_HEADERSIZE + EC_HEADERSIZE], com, length, data);
/* set WKC to zero */
frameP[ETH_HEADERSIZE + EC_HEADERSIZE + length] = 0x00;
frameP[ETH_HEADERSIZE + EC_HEADERSIZE + length + 1] = 0x00;
/* set size of frame in buffer array */
port->txbuflength[idx] = ETH_HEADERSIZE + EC_HEADERSIZE + EC_WKCSIZE + length;
return 0;
}
uint16 ecx_adddatagram(ecx_portt *port, void *frame, uint8 com, uint8 idx, boolean more, uint16 ADP, uint16 ADO, uint16 length, void *data)
{
ec_comt *datagramP;
uint8 *frameP;
uint16 prevlength;
frameP = frame;
/* copy previous frame size */
prevlength = (uint16)port->txbuflength[idx];
datagramP = (ec_comt*)&frameP[ETH_HEADERSIZE];
/* add new datagram to ethernet frame size */
datagramP->elength = htoes( etohs(datagramP->elength) + EC_HEADERSIZE + length );
/* add "datagram follows" flag to previous subframe dlength */
datagramP->dlength = htoes( etohs(datagramP->dlength) | EC_DATAGRAMFOLLOWS );
/* set new EtherCAT header position */
datagramP = (ec_comt*)&frameP[prevlength - EC_ELENGTHSIZE];
datagramP->command = com;
datagramP->index = idx;
datagramP->ADP = htoes(ADP);
datagramP->ADO = htoes(ADO);
if (more)
{
/* this is not the last datagram to add */
datagramP->dlength = htoes(length | EC_DATAGRAMFOLLOWS);
}
else
{
/* this is the last datagram in the frame */
datagramP->dlength = htoes(length);
}
ecx_writedatagramdata(&frameP[prevlength + EC_HEADERSIZE - EC_ELENGTHSIZE], com, length, data);
/* set WKC to zero */
frameP[prevlength + EC_HEADERSIZE - EC_ELENGTHSIZE + length] = 0x00;
frameP[prevlength + EC_HEADERSIZE - EC_ELENGTHSIZE + length + 1] = 0x00;
/* set size of frame in buffer array */
port->txbuflength[idx] = prevlength + EC_HEADERSIZE - EC_ELENGTHSIZE + EC_WKCSIZE + length;
/* return offset to data in rx frame
14 bytes smaller than tx frame due to stripping of ethernet header */
return prevlength + EC_HEADERSIZE - EC_ELENGTHSIZE - ETH_HEADERSIZE;
}
这两个函数大体上看一下就行,就是处理datagram。倒是这个data指针是怎么回事需要关注:
ecx_setupdatagram(context->port, &(context->port->txbuf[idx]), EC_CMD_LRW, idx, w1, w2, sublength, data);
这里把这个data指针传进去,本质上就是把数据传递给context->port->txbuf[idx],在此基础上封包准备发送。
而关于data,又是如下的操作:
data = context->grouplist[group].inputs;
……
data = context->grouplist[group].outputs;
……
data += sublength;
前两者是在经过一系列的判断条件之后对data赋初值,后者则是经过一次封包之后,把指针向后移,基本上可以判定是需要封装多个datagram的情况。总之,这个data,似乎就是被指向inputs或者outputs区域的内存当中。
那么这个inputs和outputs又是怎么回事?
在simpletest当中,初始化阶段调用了这么个函数:
ec_config_map(&IOmap);
而这个函数去掉俄罗斯套娃之后,里面是这样的:
int ecx_config_map_group(ecx_contextt *context, void *pIOmap, uint8 group)
{
uint16 slave, configadr;
uint8 BitPos;
uint32 LogAddr = 0;
uint32 oLogAddr = 0;
uint32 diff;
uint16 currentsegment = 0;
uint32 segmentsize = 0;
if ((*(context->slavecount) > 0) && (group < context->maxgroup))
{
EC_PRINT("ec_config_map_group IOmap:%p group:%d\n", pIOmap, group);
LogAddr = context->grouplist[group].logstartaddr;
oLogAddr = LogAddr;
BitPos = 0;
context->grouplist[group].nsegments = 0;
context->grouplist[group].outputsWKC = 0;
context->grouplist[group].inputsWKC = 0;
/* Find mappings and program syncmanagers */
ecx_config_find_mappings(context, group);
/* do output mapping of slave and program FMMUs */
for (slave = 1; slave <= *(context->slavecount); slave++)
{
configadr = context->slavelist[slave].configadr;
if (!group || (group == context->slavelist[slave].group))
{
/* create output mapping */
if (context->slavelist[slave].Obits)
{
ecx_config_create_output_mappings (context, pIOmap, group, slave, &LogAddr, &BitPos);
diff = LogAddr - oLogAddr;
oLogAddr = LogAddr;
if ((segmentsize + diff) > (EC_MAXLRWDATA - EC_FIRSTDCDATAGRAM))
{
context->grouplist[group].IOsegment[currentsegment] = segmentsize;
if (currentsegment < (EC_MAXIOSEGMENTS - 1))
{
currentsegment++;
segmentsize = diff;
}
}
else
{
segmentsize += diff;
}
}
}
}
if (BitPos)
{
LogAddr++;
oLogAddr = LogAddr;
BitPos = 0;
if ((segmentsize + 1) > (EC_MAXLRWDATA - EC_FIRSTDCDATAGRAM))
{
context->grouplist[group].IOsegment[currentsegment] = segmentsize;
if (currentsegment < (EC_MAXIOSEGMENTS - 1))
{
currentsegment++;
segmentsize = 1;
}
}
else
{
segmentsize += 1;
}
}
context->grouplist[group].outputs = pIOmap;
context->grouplist[group].Obytes = LogAddr - context->grouplist[group].logstartaddr;
context->grouplist[group].nsegments = currentsegment + 1;
context->grouplist[group].Isegment = currentsegment;
context->grouplist[group].Ioffset = (uint16)segmentsize;
if (!group)
{
context->slavelist[0].outputs = pIOmap;
context->slavelist[0].Obytes = LogAddr -
context->grouplist[group].logstartaddr; /* store output bytes in master record */
}
/* do input mapping of slave and program FMMUs */
for (slave = 1; slave <= *(context->slavecount); slave++)
{
configadr = context->slavelist[slave].configadr;
if (!group || (group == context->slavelist[slave].group))
{
/* create input mapping */
if (context->slavelist[slave].Ibits)
{
ecx_config_create_input_mappings(context, pIOmap, group, slave, &LogAddr, &BitPos);
diff = LogAddr - oLogAddr;
oLogAddr = LogAddr;
if ((segmentsize + diff) > (EC_MAXLRWDATA - EC_FIRSTDCDATAGRAM))
{
context->grouplist[group].IOsegment[currentsegment] = segmentsize;
if (currentsegment < (EC_MAXIOSEGMENTS - 1))
{
currentsegment++;
segmentsize = diff;
}
}
else
{
segmentsize += diff;
}
}
ecx_eeprom2pdi(context, slave); /* set Eeprom control to PDI */
/* User may override automatic state change */
if (context->manualstatechange == 0)
{
/* request safe_op for slave */
ecx_FPWRw(context->port,
configadr,
ECT_REG_ALCTL,
htoes(EC_STATE_SAFE_OP),
EC_TIMEOUTRET3); /* set safeop status */
}
if (context->slavelist[slave].blockLRW)
{
context->grouplist[group].blockLRW++;
}
context->grouplist[group].Ebuscurrent += context->slavelist[slave].Ebuscurrent;
}
}
if (BitPos)
{
LogAddr++;
oLogAddr = LogAddr;
BitPos = 0;
if ((segmentsize + 1) > (EC_MAXLRWDATA - EC_FIRSTDCDATAGRAM))
{
context->grouplist[group].IOsegment[currentsegment] = segmentsize;
if (currentsegment < (EC_MAXIOSEGMENTS - 1))
{
currentsegment++;
segmentsize = 1;
}
}
else
{
segmentsize += 1;
}
}
context->grouplist[group].IOsegment[currentsegment] = segmentsize;
context->grouplist[group].nsegments = currentsegment + 1;
context->grouplist[group].inputs = (uint8 *)(pIOmap) + context->grouplist[group].Obytes;
context->grouplist[group].Ibytes = LogAddr -
context->grouplist[group].logstartaddr -
context->grouplist[group].Obytes;
if (!group)
{
context->slavelist[0].inputs = (uint8 *)(pIOmap) + context->slavelist[0].Obytes;
context->slavelist[0].Ibytes = LogAddr -
context->grouplist[group].logstartaddr -
context->slavelist[0].Obytes; /* store input bytes in master record */
}
EC_PRINT("IOmapSize %d\n", LogAddr - context->grouplist[group].logstartaddr);
return (LogAddr - context->grouplist[group].logstartaddr);
}
return 0;
}
这里(注意,这个例子当中只有group=0的情况)Outputs, inputs, Obytes, Ibytes,以及逻辑地址,应有尽有。没错,这几个其实就是指向了IOMap数组。
context->grouplist[group].outputs = pIOmap;
context->grouplist[group].Obytes = LogAddr - context->grouplist[group].logstartaddr;
context->grouplist[group].nsegments = currentsegment + 1;
context->grouplist[group].Isegment = currentsegment;
context->grouplist[group].Ioffset = (uint16)segmentsize;
if (!group)
{
context->slavelist[0].outputs = pIOmap;
context->slavelist[0].Obytes = LogAddr -
context->grouplist[group].logstartaddr; /* store output bytes in master record */
}
其实,这个group以及slave[0]的outputs,就是指向了IOMap的首地址。而之后的逻辑地址,以及inputs地址,则是以此为基地址算出来的。
不过事情还没完,因为需要各个slave的Obytes和Ibytes,这个就是各个slave的过程数据输入输出的字节数。这个怎么来的呢?
来看看这个:ecx_config_find_mappings(context, group);
static void ecx_config_find_mappings(ecx_contextt *context, uint8 group)
{
int thrn, thrc;
uint16 slave;
for (thrn = 0; thrn < EC_MAX_MAPT; thrn++)
{
ecx_mapt[thrn].running = 0;
}
/* find CoE and SoE mapping of slaves in multiple threads */
for (slave = 1; slave <= *(context->slavecount); slave++)
{
if (!group || (group == context->slavelist[slave].group))
{
#if EC_MAX_MAPT > 1
/* multi-threaded version */
while ((thrn = ecx_find_mapt()) < 0)
{
osal_usleep(1000);
}
ecx_mapt[thrn].context = context;
ecx_mapt[thrn].slave = slave;
ecx_mapt[thrn].thread_n = thrn;
ecx_mapt[thrn].running = 1;
osal_thread_create(&(ecx_threadh[thrn]), 128000,
&ecx_mapper_thread, &(ecx_mapt[thrn]));
#else
/* serialised version */
ecx_map_coe_soe(context, slave, 0);
#endif
}
}
/* wait for all threads to finish */
do
{
thrc = ecx_get_threadcount();
if (thrc)
{
osal_usleep(1000);
}
} while (thrc);
/* find SII mapping of slave and program SM */
for (slave = 1; slave <= *(context->slavecount); slave++)
{
if (!group || (group == context->slavelist[slave].group))
{
ecx_map_sii(context, slave);
ecx_map_sm(context, slave);
}
}
}
其中,有这几个函数需要关注:
ecx_map_coe_soe(context, slave, 0);
ecx_map_sii(context, slave);
ecx_map_sm(context, slave);
咱们挨个看:
static int ecx_map_coe_soe(ecx_contextt *context, uint16 slave, int thread_n)
{
uint32 Isize, Osize;
int rval;
ecx_statecheck(context, slave, EC_STATE_PRE_OP, EC_TIMEOUTSTATE); /* check state change pre-op */
EC_PRINT(" >Slave %d, configadr %x, state %2.2x\n",
slave, context->slavelist[slave].configadr, context->slavelist[slave].state);
/* execute special slave configuration hook Pre-Op to Safe-OP */
if(context->slavelist[slave].PO2SOconfig) /* only if registered */
{
context->slavelist[slave].PO2SOconfig(slave);
}
if (context->slavelist[slave].PO2SOconfigx) /* only if registered */
{
context->slavelist[slave].PO2SOconfigx(context, slave);
}
/* if slave not found in configlist find IO mapping in slave self */
if (!context->slavelist[slave].configindex)
{
Isize = 0;
Osize = 0;
if (context->slavelist[slave].mbx_proto & ECT_MBXPROT_COE) /* has CoE */
{
rval = 0;
if (context->slavelist[slave].CoEdetails & ECT_COEDET_SDOCA) /* has Complete Access */
{
/* read PDO mapping via CoE and use Complete Access */
rval = ecx_readPDOmapCA(context, slave, thread_n, &Osize, &Isize);
}
if (!rval) /* CA not available or not succeeded */
{
/* read PDO mapping via CoE */
rval = ecx_readPDOmap(context, slave, &Osize, &Isize);
}
EC_PRINT(" CoE Osize:%u Isize:%u\n", Osize, Isize);
}
if ((!Isize && !Osize) && (context->slavelist[slave].mbx_proto & ECT_MBXPROT_SOE)) /* has SoE */
{
/* read AT / MDT mapping via SoE */
rval = ecx_readIDNmap(context, slave, &Osize, &Isize);
context->slavelist[slave].SM[2].SMlength = htoes((uint16)((Osize + 7) / 8));
context->slavelist[slave].SM[3].SMlength = htoes((uint16)((Isize + 7) / 8));
EC_PRINT(" SoE Osize:%u Isize:%u\n", Osize, Isize);
}
context->slavelist[slave].Obits = (uint16)Osize;
context->slavelist[slave].Ibits = (uint16)Isize;
}
return 1;
}
其中,PO2SOconfig是函数指针,在sampletest函数当中,ec_config_init之后,有过这样的操作:
ec_slave[slc].PO2SOconfig = &EL7031setup;
这个EL7031setup函数我们之前说到了。就是建立PDO MAP的过程,当然,不同型号的slave这个过程肯定是有区别的。
简单来说,就是通过SDO告知slave:我需要这些对象字典对应的数据,以后你就把这些数据打好包给我填到过程数据的列车上来。然后,再
ecx_readPDOmapCA(context, slave, thread_n, &Osize, &Isize);
把所需要的字节数读回来,这里
context->slavelist[slave].Obits = (uint16)Osize;
context->slavelist[slave].Ibits = (uint16)Isize;
具体IOMap的计算在这里:
static void ecx_config_create_input_mappings(ecx_contextt *context, void *pIOmap,
uint8 group, int16 slave, uint32 * LogAddr, uint8 * BitPos)
{
int BitCount = 0;
int FMMUdone = 0;
int AddToInputsWKC = 0;
uint16 ByteCount = 0;
uint16 FMMUsize = 0;
uint8 SMc = 0;
uint16 EndAddr;
uint16 SMlength;
uint16 configadr;
uint8 FMMUc;
EC_PRINT(" =Slave %d, INPUT MAPPING\n", slave);
configadr = context->slavelist[slave].configadr;
FMMUc = context->slavelist[slave].FMMUunused;
if (context->slavelist[slave].Obits) /* find free FMMU */
{
while (context->slavelist[slave].FMMU[FMMUc].LogStart)
{
FMMUc++;
}
}
/* search for SM that contribute to the input mapping */
while ((SMc < EC_MAXSM) && (FMMUdone < ((context->slavelist[slave].Ibits + 7) / 8)))
{
EC_PRINT(" FMMU %d\n", FMMUc);
while ((SMc < (EC_MAXSM - 1)) && (context->slavelist[slave].SMtype[SMc] != 4))
{
SMc++;
}
EC_PRINT(" SM%d\n", SMc);
context->slavelist[slave].FMMU[FMMUc].PhysStart =
context->slavelist[slave].SM[SMc].StartAddr;
SMlength = etohs(context->slavelist[slave].SM[SMc].SMlength);
ByteCount += SMlength;
BitCount += SMlength * 8;
EndAddr = etohs(context->slavelist[slave].SM[SMc].StartAddr) + SMlength;
while ((BitCount < context->slavelist[slave].Ibits) && (SMc < (EC_MAXSM - 1))) /* more SM for input */
{
SMc++;
while ((SMc < (EC_MAXSM - 1)) && (context->slavelist[slave].SMtype[SMc] != 4))
{
SMc++;
}
/* if addresses from more SM connect use one FMMU otherwise break up in multiple FMMU */
if (etohs(context->slavelist[slave].SM[SMc].StartAddr) > EndAddr)
{
break;
}
EC_PRINT(" SM%d\n", SMc);
SMlength = etohs(context->slavelist[slave].SM[SMc].SMlength);
ByteCount += SMlength;
BitCount += SMlength * 8;
EndAddr = etohs(context->slavelist[slave].SM[SMc].StartAddr) + SMlength;
}
/* bit oriented slave */
if (!context->slavelist[slave].Ibytes)
{
context->slavelist[slave].FMMU[FMMUc].LogStart = htoel(*LogAddr);
context->slavelist[slave].FMMU[FMMUc].LogStartbit = *BitPos;
*BitPos += context->slavelist[slave].Ibits - 1;
if (*BitPos > 7)
{
*LogAddr += 1;
*BitPos -= 8;
}
FMMUsize = (uint16)(*LogAddr - etohl(context->slavelist[slave].FMMU[FMMUc].LogStart) + 1);
context->slavelist[slave].FMMU[FMMUc].LogLength = htoes(FMMUsize);
context->slavelist[slave].FMMU[FMMUc].LogEndbit = *BitPos;
*BitPos += 1;
if (*BitPos > 7)
{
*LogAddr += 1;
*BitPos -= 8;
}
}
/* byte oriented slave */
else
{
if (*BitPos)
{
*LogAddr += 1;
*BitPos = 0;
}
context->slavelist[slave].FMMU[FMMUc].LogStart = htoel(*LogAddr);
context->slavelist[slave].FMMU[FMMUc].LogStartbit = *BitPos;
*BitPos = 7;
FMMUsize = ByteCount;
if ((FMMUsize + FMMUdone)> (int)context->slavelist[slave].Ibytes)
{
FMMUsize = (uint16)(context->slavelist[slave].Ibytes - FMMUdone);
}
*LogAddr += FMMUsize;
context->slavelist[slave].FMMU[FMMUc].LogLength = htoes(FMMUsize);
context->slavelist[slave].FMMU[FMMUc].LogEndbit = *BitPos;
*BitPos = 0;
}
FMMUdone += FMMUsize;
if (context->slavelist[slave].FMMU[FMMUc].LogLength)
{
context->slavelist[slave].FMMU[FMMUc].PhysStartBit = 0;
context->slavelist[slave].FMMU[FMMUc].FMMUtype = 1;
context->slavelist[slave].FMMU[FMMUc].FMMUactive = 1;
/* program FMMU for input */
ecx_FPWR(context->port, configadr, ECT_REG_FMMU0 + (sizeof(ec_fmmut) * FMMUc),
sizeof(ec_fmmut), &(context->slavelist[slave].FMMU[FMMUc]), EC_TIMEOUTRET3);
/* Set flag to add one for an input FMMU,
a single ESC can only contribute once */
AddToInputsWKC = 1;
}
if (!context->slavelist[slave].inputs)
{
if (group)
{
context->slavelist[slave].inputs =
(uint8 *)(pIOmap) +
etohl(context->slavelist[slave].FMMU[FMMUc].LogStart) -
context->grouplist[group].logstartaddr;
}
else
{
context->slavelist[slave].inputs =
(uint8 *)(pIOmap) +
etohl(context->slavelist[slave].FMMU[FMMUc].LogStart);
}
context->slavelist[slave].Istartbit =
context->slavelist[slave].FMMU[FMMUc].LogStartbit;
EC_PRINT(" Inputs %p startbit %d\n",
context->slavelist[slave].inputs,
context->slavelist[slave].Istartbit);
}
FMMUc++;
}
context->slavelist[slave].FMMUunused = FMMUc;
/* Add one WKC for an input if flag is true */
if (AddToInputsWKC)
context->grouplist[group].inputsWKC++;
}
static void ecx_config_create_output_mappings(ecx_contextt *context, void *pIOmap,
uint8 group, int16 slave, uint32 * LogAddr, uint8 * BitPos)
{
int BitCount = 0;
int FMMUdone = 0;
int AddToOutputsWKC = 0;
uint16 ByteCount = 0;
uint16 FMMUsize = 0;
uint8 SMc = 0;
uint16 EndAddr;
uint16 SMlength;
uint16 configadr;
uint8 FMMUc;
EC_PRINT(" OUTPUT MAPPING\n");
FMMUc = context->slavelist[slave].FMMUunused;
configadr = context->slavelist[slave].configadr;
/* search for SM that contribute to the output mapping */
while ((SMc < EC_MAXSM) && (FMMUdone < ((context->slavelist[slave].Obits + 7) / 8)))
{
EC_PRINT(" FMMU %d\n", FMMUc);
while ((SMc < (EC_MAXSM - 1)) && (context->slavelist[slave].SMtype[SMc] != 3))
{
SMc++;
}
EC_PRINT(" SM%d\n", SMc);
context->slavelist[slave].FMMU[FMMUc].PhysStart =
context->slavelist[slave].SM[SMc].StartAddr;
SMlength = etohs(context->slavelist[slave].SM[SMc].SMlength);
ByteCount += SMlength;
BitCount += SMlength * 8;
EndAddr = etohs(context->slavelist[slave].SM[SMc].StartAddr) + SMlength;
while ((BitCount < context->slavelist[slave].Obits) && (SMc < (EC_MAXSM - 1))) /* more SM for output */
{
SMc++;
while ((SMc < (EC_MAXSM - 1)) && (context->slavelist[slave].SMtype[SMc] != 3))
{
SMc++;
}
/* if addresses from more SM connect use one FMMU otherwise break up in multiple FMMU */
if (etohs(context->slavelist[slave].SM[SMc].StartAddr) > EndAddr)
{
break;
}
EC_PRINT(" SM%d\n", SMc);
SMlength = etohs(context->slavelist[slave].SM[SMc].SMlength);
ByteCount += SMlength;
BitCount += SMlength * 8;
EndAddr = etohs(context->slavelist[slave].SM[SMc].StartAddr) + SMlength;
}
/* bit oriented slave */
if (!context->slavelist[slave].Obytes)
{
context->slavelist[slave].FMMU[FMMUc].LogStart = htoel(*LogAddr);
context->slavelist[slave].FMMU[FMMUc].LogStartbit = *BitPos;
*BitPos += context->slavelist[slave].Obits - 1;
if (*BitPos > 7)
{
*LogAddr += 1;
*BitPos -= 8;
}
FMMUsize = (uint16)(*LogAddr - etohl(context->slavelist[slave].FMMU[FMMUc].LogStart) + 1);
context->slavelist[slave].FMMU[FMMUc].LogLength = htoes(FMMUsize);
context->slavelist[slave].FMMU[FMMUc].LogEndbit = *BitPos;
*BitPos += 1;
if (*BitPos > 7)
{
*LogAddr += 1;
*BitPos -= 8;
}
}
/* byte oriented slave */
else
{
if (*BitPos)
{
*LogAddr += 1;
*BitPos = 0;
}
context->slavelist[slave].FMMU[FMMUc].LogStart = htoel(*LogAddr);
context->slavelist[slave].FMMU[FMMUc].LogStartbit = *BitPos;
*BitPos = 7;
FMMUsize = ByteCount;
if ((FMMUsize + FMMUdone)> (int)context->slavelist[slave].Obytes)
{
FMMUsize = (uint16)(context->slavelist[slave].Obytes - FMMUdone);
}
*LogAddr += FMMUsize;
context->slavelist[slave].FMMU[FMMUc].LogLength = htoes(FMMUsize);
context->slavelist[slave].FMMU[FMMUc].LogEndbit = *BitPos;
*BitPos = 0;
}
FMMUdone += FMMUsize;
if (context->slavelist[slave].FMMU[FMMUc].LogLength)
{
context->slavelist[slave].FMMU[FMMUc].PhysStartBit = 0;
context->slavelist[slave].FMMU[FMMUc].FMMUtype = 2;
context->slavelist[slave].FMMU[FMMUc].FMMUactive = 1;
/* program FMMU for output */
ecx_FPWR(context->port, configadr, ECT_REG_FMMU0 + (sizeof(ec_fmmut) * FMMUc),
sizeof(ec_fmmut), &(context->slavelist[slave].FMMU[FMMUc]), EC_TIMEOUTRET3);
/* Set flag to add one for an output FMMU,
a single ESC can only contribute once */
AddToOutputsWKC = 1;
}
if (!context->slavelist[slave].outputs)
{
if (group)
{
context->slavelist[slave].outputs =
(uint8 *)(pIOmap) +
etohl(context->slavelist[slave].FMMU[FMMUc].LogStart) -
context->grouplist[group].logstartaddr;
}
else
{
context->slavelist[slave].outputs =
(uint8 *)(pIOmap) +
etohl(context->slavelist[slave].FMMU[FMMUc].LogStart);
}
context->slavelist[slave].Ostartbit =
context->slavelist[slave].FMMU[FMMUc].LogStartbit;
EC_PRINT(" slave %d Outputs %p startbit %d\n",
slave,
context->slavelist[slave].outputs,
context->slavelist[slave].Ostartbit);
}
FMMUc++;
}
context->slavelist[slave].FMMUunused = FMMUc;
/* Add one WKC for an output if flag is true */
if (AddToOutputsWKC)
context->grouplist[group].outputsWKC++;
}
PACKED_BEGIN
typedef struct PACKED ec_fmmu
{
uint32 LogStart;
uint16 LogLength;
uint8 LogStartbit;
uint8 LogEndbit;
uint16 PhysStart;
uint8 PhysStartBit;
uint8 FMMUtype;
uint8 FMMUactive;
uint8 unused1;
uint16 unused2;
} ec_fmmut;
PACKED_END
于是乎,知道了IOMap首地址,知道了一共多少个slave,知道了每个slave传输多少个字节,知道了所需要的数据在其中的排列,没错,各种过程数据的计算,操作,基本上都是围绕着指向IOMap的指针来进行的。每个周期,把IOMap里面的数据同步出去,然后再等待数据返回回来,再更新IOMap。等等,这里似乎有问题,我操作IOMap当中的数据,譬如进行计算等等操作的过程中,接收到了新的数据,数据是在接收缓冲区当中被memcpy过来的,这个时候会不会发生冲突?有可能,所以在操作的时候,要注意,数据读写的时候做好关键代码保护。举个例子,你可以将把数据从接收缓冲区搬运到IOMap的过程与计算的过程放在同一任务,或者中断处理当中,或者干脆从IOMap里面取数据的过程用互斥锁或者开关中断等机制保护起来。
还有啥来着?
static int ecx_map_sii(ecx_contextt *context, uint16 slave)
{
uint32 Isize, Osize;
int nSM;
ec_eepromPDOt eepPDO;
Osize = context->slavelist[slave].Obits;
Isize = context->slavelist[slave].Ibits;
if (!Isize && !Osize) /* find PDO in previous slave with same ID */
{
(void)ecx_lookup_mapping(context, slave, &Osize, &Isize);
}
if (!Isize && !Osize) /* find PDO mapping by SII */
{
memset(&eepPDO, 0, sizeof(eepPDO));
Isize = ecx_siiPDO(context, slave, &eepPDO, 0);
EC_PRINT(" SII Isize:%u\n", Isize);
for( nSM=0 ; nSM < EC_MAXSM ; nSM++ )
{
if (eepPDO.SMbitsize[nSM] > 0)
{
context->slavelist[slave].SM[nSM].SMlength = htoes((eepPDO.SMbitsize[nSM] + 7) / 8);
context->slavelist[slave].SMtype[nSM] = 4;
EC_PRINT(" SM%d length %d\n", nSM, eepPDO.SMbitsize[nSM]);
}
}
Osize = ecx_siiPDO(context, slave, &eepPDO, 1);
EC_PRINT(" SII Osize:%u\n", Osize);
for( nSM=0 ; nSM < EC_MAXSM ; nSM++ )
{
if (eepPDO.SMbitsize[nSM] > 0)
{
context->slavelist[slave].SM[nSM].SMlength = htoes((eepPDO.SMbitsize[nSM] + 7) / 8);
context->slavelist[slave].SMtype[nSM] = 3;
EC_PRINT(" SM%d length %d\n", nSM, eepPDO.SMbitsize[nSM]);
}
}
}
context->slavelist[slave].Obits = (uint16)Osize;
context->slavelist[slave].Ibits = (uint16)Isize;
EC_PRINT(" ISIZE:%d %d OSIZE:%d\n",
context->slavelist[slave].Ibits, Isize,context->slavelist[slave].Obits);
return 1;
}
这个sii是什么呢?简单来说就是slave配置信息接口,这个东西保存在片上的flash或者eeprom中。上面保存的内容,其中包括要map到过程数据当中的项目,这样,如果事先将需要的项写在eeprom当中,就不需要我们自己写pdo map的映射,直接读sii就可以建立map了。
当然,如果我们主动用sdo进行了pdo的map,则会优先使用我们的设置,而非sii当中的设置。
还有这个:
static int ecx_map_sm(ecx_contextt *context, uint16 slave)
{
uint16 configadr;
int nSM;
configadr = context->slavelist[slave].configadr;
EC_PRINT(" SM programming\n");
if (!context->slavelist[slave].mbx_l && context->slavelist[slave].SM[0].StartAddr)
{
ecx_FPWR(context->port, configadr, ECT_REG_SM0,
sizeof(ec_smt), &(context->slavelist[slave].SM[0]), EC_TIMEOUTRET3);
EC_PRINT(" SM0 Type:%d StartAddr:%4.4x Flags:%8.8x\n",
context->slavelist[slave].SMtype[0],
etohs(context->slavelist[slave].SM[0].StartAddr),
etohl(context->slavelist[slave].SM[0].SMflags));
}
if (!context->slavelist[slave].mbx_l && context->slavelist[slave].SM[1].StartAddr)
{
ecx_FPWR(context->port, configadr, ECT_REG_SM1,
sizeof(ec_smt), &context->slavelist[slave].SM[1], EC_TIMEOUTRET3);
EC_PRINT(" SM1 Type:%d StartAddr:%4.4x Flags:%8.8x\n",
context->slavelist[slave].SMtype[1],
etohs(context->slavelist[slave].SM[1].StartAddr),
etohl(context->slavelist[slave].SM[1].SMflags));
}
/* program SM2 to SMx */
for( nSM = 2 ; nSM < EC_MAXSM ; nSM++ )
{
if (context->slavelist[slave].SM[nSM].StartAddr)
{
/* check if SM length is zero -> clear enable flag */
if( context->slavelist[slave].SM[nSM].SMlength == 0)
{
context->slavelist[slave].SM[nSM].SMflags =
htoel( etohl(context->slavelist[slave].SM[nSM].SMflags) & EC_SMENABLEMASK);
}
/* if SM length is non zero always set enable flag */
else
{
context->slavelist[slave].SM[nSM].SMflags =
htoel( etohl(context->slavelist[slave].SM[nSM].SMflags) | ~EC_SMENABLEMASK);
}
ecx_FPWR(context->port, configadr, (uint16)(ECT_REG_SM0 + (nSM * sizeof(ec_smt))),
sizeof(ec_smt), &context->slavelist[slave].SM[nSM], EC_TIMEOUTRET3);
EC_PRINT(" SM%d Type:%d StartAddr:%4.4x Flags:%8.8x\n", nSM,
context->slavelist[slave].SMtype[nSM],
etohs(context->slavelist[slave].SM[nSM].StartAddr),
etohl(context->slavelist[slave].SM[nSM].SMflags));
}
}
if (context->slavelist[slave].Ibits > 7)
{
context->slavelist[slave].Ibytes = (context->slavelist[slave].Ibits + 7) / 8;
}
if (context->slavelist[slave].Obits > 7)
{
context->slavelist[slave].Obytes = (context->slavelist[slave].Obits + 7) / 8;
}
return 1;
}
SM的信息是在之前的ecx_readPDOmapCA当中读回来的,这里只是对SM0和SM1进行了一下读写。
关于SM,我想后续有机会再说。
细心的你可能会发现,在数据收发的过程中,有一个Index参数。这个参数就是datagram头里面那个index.
SOEM在这里把index与缓冲区编号对应起来了。进而,rx与tx的缓冲区是两两对应的。
在SDO的过程中,几次握手,相对应的问与答都是相同的index,保存在对应index的缓冲区当中。
于是,我们看到了这样一种机制:
int ecx_getindex(ecx_portt *port)
{
int idx;
int cnt;
EnterCriticalSection(&(port->getindex_mutex));
idx = port->lastidx + 1;
/* index can't be larger than buffer array */
if (idx >= EC_MAXBUF)
{
idx = 0;
}
cnt = 0;
/* try to find unused index */
while ((port->rxbufstat[idx] != EC_BUF_EMPTY) && (cnt < EC_MAXBUF))
{
idx++;
cnt++;
if (idx >= EC_MAXBUF)
{
idx = 0;
}
}
port->rxbufstat[idx] = EC_BUF_ALLOC;
if (port->redstate != ECT_RED_NONE)
port->redport->rxbufstat[idx] = EC_BUF_ALLOC;
port->lastidx = idx;
LeaveCriticalSection(&(port->getindex_mutex));
return idx;
}
每次准备发数据之前,都有申请一个index,同时,申请index的同时,会对缓冲区的状态做标记,也就是记为EC_BUF_ALLOC状态。
这里rxbufstat这个变量名有点迷惑性,因为刚才说了,接收和发送的buf是两两对应的。
SDO等非循环数据的收发,通过这种机制锁定所需要的index,等待对应的返回数据,也是依照这个index来判断这一包数据是否属于自己。这种机制避免了多任务,或者有中断需要处理数据传输的时候,所造成的竞争的问题。
举个例子,在多任务里面做一次一问一答的处理,这个过程中,有更高优先级的任务甚至中断插入进来,需要做数据传输。此时因为其申请到的是不同的index,所以处理的是不同的缓冲区。不过问题依然不能完全避免,譬如说,先申请的index对应的数据未必先被发出去,这个就需要在接收的时候做相应的处理才行。好消息是,接收的时候只要把数据搬运到与接收包的index对应的缓冲区里面,就好了。等待接收的程序,就去查询该缓冲区的状态。
非循环数据不需要考虑太多效率的问题,可以一问一答慢慢地等,循环数据(主要是过程数据)则不能如此。
所以,这里是发送的时候,把index等信息压到一个fifo里面,等接收的时候再从该fifo里读出来。也就是这几个函数:
static void ecx_pushindex(ecx_contextt *context, uint8 idx, void *data, uint16 length, uint16 DCO)
{
if(context->idxstack->pushed < EC_MAXBUF)
{
context->idxstack->idx[context->idxstack->pushed] = idx;
context->idxstack->data[context->idxstack->pushed] = data;
context->idxstack->length[context->idxstack->pushed] = length;
context->idxstack->dcoffset[context->idxstack->pushed] = DCO;
context->idxstack->pushed++;
}
}
static int ecx_pullindex(ecx_contextt *context)
{
int rval = -1;
if(context->idxstack->pulled < context->idxstack->pushed)
{
rval = context->idxstack->pulled;
context->idxstack->pulled++;
}
return rval;
}
此外,在接收完成之后,再将该fifo清空。
static void ecx_clearindex(ecx_contextt *context) {
context->idxstack->pushed = 0;
context->idxstack->pulled = 0;
}
还有这两个函数:
int ec_RxPDO(uint16 Slave, uint16 RxPDOnumber , int psize, const void *p);
int ec_TxPDO(uint16 slave, uint16 TxPDOnumber , int *psize, void *p, int timeout);
这对我造成了困惑:
int ecx_TxPDO(ecx_contextt *context, uint16 slave, uint16 TxPDOnumber , int *psize, void *p, int timeout)
{
ec_SDOt *SDOp, *aSDOp;
int wkc;
ec_mbxbuft MbxIn, MbxOut;
uint8 cnt;
uint16 framedatasize;
ec_clearmbx(&MbxIn);
/* Empty slave out mailbox if something is in. Timeout set to 0 */
wkc = ecx_mbxreceive(context, slave, (ec_mbxbuft *)&MbxIn, 0);
ec_clearmbx(&MbxOut);
aSDOp = (ec_SDOt *)&MbxIn;
SDOp = (ec_SDOt *)&MbxOut;
SDOp->MbxHeader.length = htoes(0x02);
SDOp->MbxHeader.address = htoes(0x0000);
SDOp->MbxHeader.priority = 0x00;
/* get new mailbox counter, used for session handle */
cnt = ec_nextmbxcnt(context->slavelist[slave].mbx_cnt);
context->slavelist[slave].mbx_cnt = cnt;
SDOp->MbxHeader.mbxtype = ECT_MBXT_COE + MBX_HDR_SET_CNT(cnt); /* CoE */
SDOp->CANOpen = htoes((TxPDOnumber & 0x01ff) + (ECT_COES_TXPDO_RR << 12)); /* number 9bits service upper 4 bits */
wkc = ecx_mbxsend(context, slave, (ec_mbxbuft *)&MbxOut, EC_TIMEOUTTXM);
if (wkc > 0)
{
/* clean mailboxbuffer */
ec_clearmbx(&MbxIn);
/* read slave response */
wkc = ecx_mbxreceive(context, slave, (ec_mbxbuft *)&MbxIn, timeout);
if (wkc > 0) /* succeeded to read slave response ? */
{
/* slave response should be CoE, TxPDO */
if (((aSDOp->MbxHeader.mbxtype & 0x0f) == ECT_MBXT_COE) &&
((etohs(aSDOp->CANOpen) >> 12) == ECT_COES_TXPDO))
{
/* TxPDO response */
framedatasize = (aSDOp->MbxHeader.length - 2);
if (*psize >= framedatasize) /* parameter buffer big enough ? */
{
/* copy parameter in parameter buffer */
memcpy(p, &aSDOp->Command, framedatasize);
/* return the real parameter size */
*psize = framedatasize;
}
/* parameter buffer too small */
else
{
wkc = 0;
ecx_packeterror(context, slave, 0, 0, 3); /* data container too small for type */
}
}
/* other slave response */
else
{
if ((aSDOp->Command) == ECT_SDO_ABORT) /* SDO abort frame received */
{
ecx_SDOerror(context, slave, 0, 0, etohl(aSDOp->ldata[0]));
}
else
{
ecx_packeterror(context, slave, 0, 0, 1); /* Unexpected frame returned */
}
wkc = 0;
}
}
}
return wkc;
}
int ecx_RxPDO(ecx_contextt *context, uint16 Slave, uint16 RxPDOnumber, int psize, const void *p)
{
ec_SDOt *SDOp;
int wkc, maxdata, framedatasize;
ec_mbxbuft MbxIn, MbxOut;
uint8 cnt;
ec_clearmbx(&MbxIn);
/* Empty slave out mailbox if something is in. Timeout set to 0 */
wkc = ecx_mbxreceive(context, Slave, (ec_mbxbuft *)&MbxIn, 0);
ec_clearmbx(&MbxOut);
SDOp = (ec_SDOt *)&MbxOut;
maxdata = context->slavelist[Slave].mbx_l - 0x08; /* data section=mailbox size - 6 mbx - 2 CoE */
framedatasize = psize;
if (framedatasize > maxdata)
{
framedatasize = maxdata; /* limit transfer */
}
SDOp->MbxHeader.length = htoes((uint16)(0x02 + framedatasize));
SDOp->MbxHeader.address = htoes(0x0000);
SDOp->MbxHeader.priority = 0x00;
/* get new mailbox counter, used for session handle */
cnt = ec_nextmbxcnt(context->slavelist[Slave].mbx_cnt);
context->slavelist[Slave].mbx_cnt = cnt;
SDOp->MbxHeader.mbxtype = ECT_MBXT_COE + MBX_HDR_SET_CNT(cnt); /* CoE */
SDOp->CANOpen = htoes((RxPDOnumber & 0x01ff) + (ECT_COES_RXPDO << 12)); /* number 9bits service upper 4 bits */
/* copy PDO data to mailbox */
memcpy(&SDOp->Command, p, framedatasize);
/* send mailbox RxPDO request to slave */
wkc = ecx_mbxsend(context, Slave, (ec_mbxbuft *)&MbxOut, EC_TIMEOUTTXM);
return wkc;
}
这种用邮箱的方式进行握手,然后传递数据的手段,显然不是我们用来传递过程数据的常规方式,不过EtherCAT协议的确支持这种方式。
再次声明,本人写的未必准确,欢迎斧正。