本帖最后由 Simon21ic 于 2014-7-24 20:03 编辑
既然,这两天还有时间,那就乘此机会多写一些。
虽然不是100%,但是我相信大部分地方使用串口的时候是这样的:
void usart_send_byte(uint8_t ch)
{
USART->DATA = ch;
while (USART->STAT & TX_BUSY);
}
void usart_send(uint8_t *data, uint32_t size)
{
while (size--) usart_send_byte(*data++);
}
首先,在不使用DMA的前提下,串口的数据收发使用VSF定义的流机制。
其实,流的处理方式也非常简单,本质上只是一个fifo,用户只需要操作这个fifo,中间层和底层的驱动代码,自动处理了数据的收发。
stream的结构和代码:
<code>
struct vsf_stream_t
{
struct vsf_fifo_t fifo;
bool overflow;
};
vsf_err_t stream_init(struct vsf_stream_t *stream);
vsf_err_t stream_fini(struct vsf_stream_t *stream);
uint32_t stream_rx(struct vsf_stream_t *stream, struct vsf_buffer_t *buffer);
uint32_t stream_tx(struct vsf_stream_t *stream, struct vsf_buffer_t *buffer);
uint32_t stream_get_data_size(struct vsf_stream_t *stream);
uint32_t stream_get_free_size(struct vsf_stream_t *stream);
vsf_err_t stream_init(struct vsf_stream_t *stream)
{
stream->overflow = false;
vsf_fifo_init(&stream->fifo);
return VSFERR_NONE;
}
vsf_err_t stream_fini(struct vsf_stream_t *stream)
{
REFERENCE_PARAMETER(stream);
return VSFERR_NONE;
}
uint32_t stream_rx(struct vsf_stream_t *stream, struct vsf_buffer_t *buffer)
{
return vsf_fifo_pop(&stream->fifo, buffer->size, buffer->buffer);
}
uint32_t stream_tx(struct vsf_stream_t *stream, struct vsf_buffer_t *buffer)
{
uint32_t tx_size;
tx_size = vsf_fifo_push(&stream->fifo, buffer->size, buffer->buffer);
if (tx_size < buffer->size)
{
stream->overflow = true;
}
return tx_size;
}
uint32_t stream_get_data_size(struct vsf_stream_t *stream)
{
return vsf_fifo_get_data_length(&stream->fifo);
}
uint32_t stream_get_free_size(struct vsf_stream_t *stream)
{
return vsf_fifo_get_avail_length(&stream->fifo);
}
</code>
和VSF里的其他模块一样,代码都是基于面向对象的方式设计的。
stream其实只是把fifo重新封装了一下。
再来看看usart_stream:
<code>
struct usart_info_t
{
uint32_t baudrate;
uint8_t datalength;
uint8_t mode;
};
struct usart_stream_info_t
{
uint8_t usart_index;
struct vsf_stream_t stream_tx;
struct vsf_stream_t stream_rx;
struct usart_info_t usart_info;
};
static void usart_stream_onrx(void *p, uint16_t data)
{
struct usart_stream_info_t *usart_stream = (struct usart_stream_info_t *)p;
struct vsf_buffer_t buffer;
uint8_t byte = (uint8_t)data;
buffer.buffer = &byte;
buffer.size = 1;
stream_tx(&usart_stream->stream_rx, &buffer);
}
vsf_err_t usart_stream_init(struct usart_stream_info_t *usart_stream)
{
stream_init(&usart_stream->stream_tx);
stream_init(&usart_stream->stream_rx);
if ((usart_stream->usart_index != IFS_DUMMY_PORT) &&
( core_interfaces.usart.init(usart_stream->usart_index) ||
core_interfaces.usart.config_callback(usart_stream->usart_index,
(void *)usart_stream, NULL, usart_stream_onrx)))
{
return VSFERR_FAIL;
}
return VSFERR_NONE;
}
vsf_err_t usart_stream_fini(struct usart_stream_info_t *usart_stream)
{
if (usart_stream->usart_index != IFS_DUMMY_PORT)
{
core_interfaces.usart.config_callback(usart_stream->usart_index,
NULL, NULL, NULL);
return core_interfaces.usart.fini(usart_stream->usart_index);
}
return VSFERR_NONE;
}
vsf_err_t usart_stream_config(struct usart_stream_info_t *usart_stream)
{
if ((usart_stream->usart_index != IFS_DUMMY_PORT) &&
core_interfaces.usart.config(usart_stream->usart_index,
usart_stream->usart_info.baudrate,
usart_stream->usart_info.datalength,
usart_stream->usart_info.mode))
{
return VSFERR_FAIL;
}
return VSFERR_NONE;
}
uint32_t usart_stream_rx(struct usart_stream_info_t *usart_stream,
struct vsf_buffer_t *buffer)
{
return stream_rx(&usart_stream->stream_rx, buffer);
}
uint32_t usart_stream_tx(struct usart_stream_info_t *usart_stream,
struct vsf_buffer_t *buffer)
{
return stream_tx(&usart_stream->stream_tx, buffer);
}
vsf_err_t usart_stream_poll(struct usart_stream_info_t *usart_stream)
{
if ((usart_stream->usart_index != IFS_DUMMY_PORT) &&
!core_interfaces.usart.tx_isready(usart_stream->usart_index) &&
stream_get_data_size(&usart_stream->stream_tx))
{
uint8_t data;
struct vsf_buffer_t buffer;
buffer.buffer = &data;
buffer.size = 1;
if (stream_rx(&usart_stream->stream_tx, &buffer) == buffer.size)
{
return core_interfaces.usart.tx(usart_stream->usart_index,
(uint16_t)data);
}
}
return VSFERR_NONE;
}
</code>
大部分代码都非常简单,只是调用了stream的类函数来操作stream实例,以及调用interfaces->usart来操作底层的硬件。
usart_stream_onrx是usart_stream_init里设置给底层的接收数据的中断处理接口,操作只是把当前接收的直接送进rx的流里去。
接收数据使用中断的方式,主要是因为现在的ARM芯片,串口可以跑到比较高的速度(有的可以到几M bps),以轮询方式的话,非常可能会丢数据。
usart_stream_poll是在主循环里轮询的接口,只是判断串口是否有效,串口发送数据是否完成,tx流里是否有数据要发送。
如果是的话,从tx流读取一个直接,通过interfaces->usart接口发送出去。
应用层里,代码就相当简洁了:
struct usart_stream_info_t usart_stream_p0 =
{
......
};
static uint8_t usart_tx_buffer[128];
int main(void)
{
vsf_buffer_t buffer;
uint32_t size;
interfaces->core.init(NULL);
usart_stream_init(&usart_stream_p0);
usart_stream_config(&usart_stream_p0, .....);
while (1)
{
buffer.buffer = usart_tx_buffer;
buffer.size = sizeof(usart_tx_buffer);
size = usart_stream_rx(&usart_stream_p0, &buffer);
if (!size)
{
buffer.size = size;
usart_stream_tx(&usart_stream_p0, &buffer);
}
usart_stream_poll(&usart_stream_p0);
}
}
当然,这里讨论的只是非DMA的usart流驱动,还是基于DMA的usart流驱动。
在poll里,一次可以发送多个字节,fifo可以有一个接口,可以读取连续的最大的缓存。
|