1
#include <drivers/serial/uart_dev.h>
2
#include <drivers/serial/diag_serial.h>
3
#include <drivers/gpio/gpio.h>
4
#include <drivers/ttys.h>
7
#include "kernel/irq_lock.h"
8
#include "plib035_uart.h"
11
#include "util/ring_buff.h"
12
#include <embox/unit.h>
13
#include <kernel/time/ktime.h>
15
#include <drivers/serial/k1921vk035_io_request.h>
17
extern const struct uart_ops k1921vk035_uart_ops;
18
#include <kernel/printk.h>
20
void io_request_clean(io_request_t* io) {
21
io->type = IO_REQUEST_MODE_NONE;
22
memset(&io->mode, 0, sizeof(io->mode));
25
irq_return_t uart_io_request_handler(unsigned int irq_nr, void *data) {
26
io_request_t* io_req = (io_request_t* )data;
29
while(uart_hasrx(io_req->uart)) {
30
int ch = uart_getc(io_req->uart);
32
switch(io_req->type) {
33
case IO_REQUEST_MODE_NONE:
36
int res = ring_buff_enqueue(io_req->rx_buff, &ch, 1);
42
case IO_REQUEST_MODE_READ:
44
volatile io_mode_read* read = &io_req->mode.read;
48
if(read->count == 0) {
49
io_request_clean(io_req);
51
sem_post(&io_req->semaphore);
55
case IO_REQUEST_MODE_DISCARD:
58
volatile io_mode_discard* discard = &io_req->mode.discard;
60
if(discard->count == 0) {
61
io_request_clean(io_req);
72
int io_request_read(io_request_t* io, char* buf, size_t count) {
76
int ring_count = ring_buff_get_cnt(io->rx_buff);
77
if(ring_count >= count) {
79
for(int i = 0; i < count; i++) {
80
ring_buff_dequeue(io->rx_buff, buf + i, 1);
85
for(int i = 0; i < ring_count; i++) {
86
ring_buff_dequeue(io->rx_buff, buf + i, 1);
88
io->type = IO_REQUEST_MODE_READ;
89
io->mode.read.buffer = buf + ring_count;
90
io->mode.read.count = count - ring_count;
95
sem_wait(&io->semaphore);
99
int io_request_readv(io_request_t* io, struct iovec* iov, size_t iov_count) {
101
for(int i = 0; i < iov_count; i++) {
102
ret += io_request_read(io, iov[i].iov_base, iov[i].iov_len);
107
int io_request_read_timeout(io_request_t* io, char* buf, size_t count, int32_t timeout_ms) {
109
int ring_count = ring_buff_get_cnt(io->rx_buff);
110
int read_count = ring_count > count ? count : ring_count;
113
for(int i = 0; i < read_count; i++) {
114
ring_buff_dequeue(io->rx_buff, buf + i, 1);
119
if(ring_count >= count) {
121
for(int i = 0; i < count; i++) {
122
ring_buff_dequeue(io->rx_buff, buf + i, 1);
127
for(int i = 0; i < ring_count; i++) {
128
ring_buff_dequeue(io->rx_buff, buf + i, 1);
130
io->type = IO_REQUEST_MODE_READ;
131
io->mode.read.buffer = buf + ring_count;
132
io->mode.read.count = count - ring_count;
138
struct timespec current_time = {};
139
clock_gettime(CLOCK_REALTIME, ¤t_time);
140
struct timespec wait_time = timespec_add(current_time, ns_to_timespec((uint64_t)(timeout_ms) * 1000000));\
141
int wait_res = sem_timedwait(&io->semaphore, &wait_time);
142
if(wait_res == -ETIMEDOUT) {
143
int ret = count - io->mode.read.count;
144
io_request_clean(io);
150
int io_request_readv_timeout(io_request_t* io, struct iovec* iov, size_t iov_count, int32_t timeout_ms_per_iov) {
152
for(int i = 0; i < iov_count; i++) {
153
int iov_ret = io_request_read_timeout(io, iov[i].iov_base, iov[i].iov_len, timeout_ms_per_iov);
155
if(iov_ret < iov[i].iov_len) {
164
int io_request_write(io_request_t* io, char* buf, size_t count) {
165
for(size_t i = 0; i < count; i++) {
166
uart_putc(io->uart, buf[i]);
174
void io_request_discard_next_bytes(io_request_t* io, size_t count) {
176
io->type = IO_REQUEST_MODE_DISCARD;
177
io->mode.discard.count = count;