static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
{
gb->buf_buf = kmalloc(size, GFP_KERNEL);
if (gb->buf_buf == NULL)
return -ENOMEM;
gb->buf_size = size;
gb->buf_put = gb->buf_buf;
gb->buf_get = gb->buf_buf;
return 0;
}
对 gs_buf 对象分配一片内存空间,gs_buf定义为:
struct gs_buf {
unsigned buf_size;
char *buf_buf;
char *buf_get;
char *buf_put;
};
gs_buf定义为一个环形buffer,buf_size表示这段空间的大小,通过kmalloc分配 buf_size 大小字节的空间,buf_buf 指向环形buffer的其实地址,buf_put 是环形buffer的“队尾”用来装载数据;buf_get是“对头”用来指向删除数据端。
有内存的申请就必然有内存的释放,按照kernel的对称原则,kmalloc必定和kfree配套使用,才能使得kernel稳定工作
static void gs_buf_free(struct gs_buf *gb)
{
kfree(gb->buf_buf);
gb->buf_buf = NULL;
}
对于环形buffer的操作也是一种数据结构和算法的结合,按照数据课程的套路,初始化(增)和释放(删)有了,当然还需要改、查之类的操作。
不一般的删除——清除数据,gs_buf_free的删除是斩草除根式的删除,下面的删除是留的青山在,不怕没柴烧的删除,用精准的chinese,这应该叫清空环形buffer。
static void gs_buf_clear(struct gs_buf *gb)
{
gb->buf_get = gb->buf_put;
/* equivalent to a get of all data available */
}
增——往内存中放入数据。
static unsigned
gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
{
unsigned len;
len = gs_buf_space_avail(gb);
if (count > len)
count = len;
if (count == 0)
return 0;
len = gb->buf_buf + gb->buf_size - gb->buf_put;
if (count > len) {
memcpy(gb->buf_put, buf, len);
memcpy(gb->buf_buf, buf+len, count - len);
gb->buf_put = gb->buf_buf + count - len;
} else {
memcpy(gb->buf_put, buf, count);
if (count < len)
gb->buf_put += count;
else /* count == len */
gb->buf_put = gb->buf_buf;
}
return count;
}
将内存放入到环形buffer中,传进来的字节数(count)大于buffer中可用字节数(len),则最多把buffer占满。由于这是一个环行的存储结构,因此还需要考虑可用存储空间的分配问题,若需要存储的buffer长度(count)在”队尾“之后到整个gs_buf结束之前的空间大小(len = gb->buf_buf + gb->buf_size - gb->buf_put),则用 memcpy(gb->buf_put, buf, count); 一次性拷贝,否则就分作两次拷贝,因为这式的内存空间分配在两边,把环形掰开成来理解图如下:
其中划×的表示有数据,空的表示没有数据,一个格子表示一个byte,要存5字节数据,就先把but_put 后面的3个填满:memcpy(gb->buf_put, buf, len); 再将剩余的数据装入前面的内存中 memcpy(gb->buf_buf, buf+len, count - len);
static unsigned
gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
{
unsigned len;
len = gs_buf_data_avail(gb);
if (count > len)
count = len;
if (count == 0)
return 0;
len = gb->buf_buf + gb->buf_size - gb->buf_get;
if (count > len) {
memcpy(buf, gb->buf_get, len);
memcpy(buf+len, gb->buf_buf, count - len);
gb->buf_get = gb->buf_buf + count - len;
} else {
memcpy(buf, gb->buf_get, count);
if (count < len)
gb->buf_get += count;
else /* count == len */
gb->buf_get = gb->buf_buf;
}
return count;
}
数据的获取的思想和存入数据的思想是如出一辙,唯一改变的是把,buf_put 换成了 buf_get.
环,buffer是环形的buffer,什么是环形?就是圆形,把数据比喻为构成圆周长线的点,每个点的大小和圆的周长均已知,那么一个圆上有多少个点就不言而喻了。要个这些点依次图上不同的颜色,每种颜色图了一圈之后才能换下一种颜色。涂了一会儿会想知道还有多少没有涂或者涂了多少,那么下面的算法也许适合
static unsigned gs_buf_data_avail(struct gs_buf *gb)
{
return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
}
返回涂了多少,即内存中装了多少数据。
static unsigned gs_buf_space_avail(struct gs_buf *gb)
{
return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
}
返回还有多少没有涂,即还有多少空间可以载入数据。
生成一个usb 的request,以便发起一次数据传送。
struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, kmalloc_flags);
if (req != NULL) {
req->length = len;
req->buf = kmalloc(len, kmalloc_flags);
if (req->buf == NULL) {
usb_ep_free_request(ep, req);
return NULL;
}
}
return req;
}
usb_ep_alloc_request()函数是req正真的缔造者,传入ep,ep来能源于usb,在usb的世界,ep是数据的终点和起点。乘坐pip这趟列车,达到下一个ep。关于usb_ep_alloc_request()函数,来源于/driver/usb/gadget/udc/core.c
struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct usb_request *req = NULL;
req = ep->ops->alloc_request(ep, gfp_flags);
trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
return req;
}
udc?? 哦~~ USB device controler,最终还是调用了ep->ops->alloc_request(ep, gfp_flags);这个得ep的初始化谈起了。铺垫工作差不多了,来看看数据的发送、接受的操作
将数据写入到环形队列中:
static unsigned gs_send_packet(struct gs_port *port, char *packet, unsigned size)
{
unsigned len;
len = gs_buf_data_avail(&port->port_write_buf);
if (len < size)
size = len;
if (size != 0)
size = gs_buf_get(&port->port_write_buf, packet, size);
return size;
}
对于串口来说是将数据发送,从列表中获取req,将环形buffer中的数据写入到req的buffer中(gs_send_packet);将req 入队(usb_ep_queue),并唤醒tty。
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
struct list_head *pool = &port->write_pool;
struct usb_ep *in;
int status = 0;
bool do_tty_wake = false;
if (!port->port_usb)
return status;
in = port->port_usb->in;
while (!port->write_busy && !list_empty(pool)) {
struct usb_request *req;
int len;
if (port->write_started >= QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
len = gs_send_packet(port, req->buf, in->maxpacket);
if (len == 0) {
wake_up_interruptible(&port->drain_wait);
break;
}
do_tty_wake = true;
req->length = len;
list_del(&req->list);
req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
port->port_num, len, *((u8 *)req->buf),
*((u8 *)req->buf+1), *((u8 *)req->buf+2));
/* Drop lock while we call out of driver; completions
* could be issued while we do so. Disconnection may
* happen too; maybe immediately before we queue this!
*
* NOTE that we may keep sending data for a while after
* the TTY closed (dev->ioport->port_tty is NULL).
*/
port->write_busy = true;
spin_unlock(&port->port_lock);
status = usb_ep_queue(in, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
port->write_busy = false;
if (status) {
pr_debug("%s: %s %s err %d\n",
__func__, "queue", in->name, status);
list_add(&req->list, pool);
break;
}
port->write_started++;
/* abort immediately after disconnect */
if (!port->port_usb)
break;
}
if (do_tty_wake && port->port.tty)
tty_wakeup(port->port.tty);
return status;
}
又见req,不同的是,这个req入队到out端点了,usb的世界真奇妙,端点都是单边通信,out不能in,in不能out,只有端点0,既能in又能out,简单明了。数据的读取,一开始看得有点懵,这数据从哪里来的,咋就出去了呢?哈哈~~,其实,这里忽略了一个细节,这是usb,从usb过来的数据早已在req中了。
static unsigned gs_start_rx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
struct list_head *pool = &port->read_pool;
struct usb_ep *out = port->port_usb->out;
while (!list_empty(pool)) {
struct usb_request *req;
int status;
struct tty_struct *tty;
/* no more rx if closed */
tty = port->port.tty;
if (!tty)
break;
if (port->read_started >= QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list);
list_del(&req->list);
req->length = out->maxpacket;
/* drop lock while we call out; the controller driver
* may need to call us back (e.g. for disconnect)
*/
spin_unlock(&port->port_lock);
status = usb_ep_queue(out, req, GFP_ATOMIC);
spin_lock(&port->port_lock);
if (status) {
pr_debug("%s: %s %s err %d\n",
__func__, "queue", out->name, status);
list_add(&req->list, pool);
break;
}
port->read_started++;
/* abort immediately after disconnect */
if (!port->port_usb)
break;
}
return port->read_started;
}