embox

Форк
0
/
k1921vk035_io_request.c 
179 строк · 5.7 Кб
1
#include <drivers/serial/uart_dev.h>
2
#include <drivers/serial/diag_serial.h>
3
#include <drivers/gpio/gpio.h>
4
#include <drivers/ttys.h>
5
#include "K1921VK035.h"
6
#include "errno.h"
7
#include "kernel/irq_lock.h"
8
#include "plib035_uart.h"
9
#include "string.h"
10
#include "time.h"
11
#include "util/ring_buff.h"
12
#include <embox/unit.h>
13
#include <kernel/time/ktime.h>
14
#include <sys/uio.h>
15
#include <drivers/serial/k1921vk035_io_request.h>
16

17
extern const struct uart_ops k1921vk035_uart_ops;
18
#include <kernel/printk.h>
19

20
void io_request_clean(io_request_t* io) {
21
    io->type = IO_REQUEST_MODE_NONE;
22
    memset(&io->mode, 0, sizeof(io->mode));
23
}
24

25
irq_return_t uart_io_request_handler(unsigned int irq_nr, void *data) {
26
    io_request_t* io_req = (io_request_t* )data;
27
    // printk("I %d, RIS: 0x%x, HRX %d\n", irq_nr, ((UART_TypeDef* )io_req->uart->base_addr)->RIS, uart_hasrx(io_req->uart));
28
    irq_lock();
29
    while(uart_hasrx(io_req->uart)) {
30
        int ch = uart_getc(io_req->uart);
31
        // printk("HRX %d %2.X\n", uart_hasrx(io_req->uart), ch);
32
        switch(io_req->type) {
33
            case IO_REQUEST_MODE_NONE:
34
            {
35
                /* Store in intermediate buffer */
36
                int res = ring_buff_enqueue(io_req->rx_buff, &ch, 1);
37
                if(res == 0) {
38
                    /* TODO: no one wants to read out data so intermediate buffer is overflowed, what do we do? */
39
                }
40
            } break;
41

42
            case IO_REQUEST_MODE_READ:
43
            {
44
                volatile io_mode_read* read = &io_req->mode.read;
45
                read->buffer[0] = ch;
46
                read->buffer++;
47
                read->count--;
48
                if(read->count == 0) {
49
                    io_request_clean(io_req);
50
                    /* Notify thread that was waiting */
51
                    sem_post(&io_req->semaphore);
52
                }
53
            } break;
54

55
            case IO_REQUEST_MODE_DISCARD:
56
            {
57
                /* Do nothing with byte */
58
                volatile io_mode_discard* discard = &io_req->mode.discard;
59
                discard->count--;
60
                if(discard->count == 0) {
61
                    io_request_clean(io_req);
62
                }
63
            } break;
64

65
        }
66
    }
67
    irq_unlock();
68
    return IRQ_HANDLED;
69
}
70

71

72
int io_request_read(io_request_t* io, char* buf, size_t count) {
73
    irq_lock();
74
    {
75
        /* use buffer first */
76
        int ring_count = ring_buff_get_cnt(io->rx_buff);
77
        if(ring_count >= count) {
78
            /* we can satisfy call right now */
79
            for(int i = 0; i < count; i++) {
80
                ring_buff_dequeue(io->rx_buff, buf + i, 1);
81
            }
82
            irq_unlock();
83
            return count;
84
        } else {
85
            for(int i = 0; i < ring_count; i++) {
86
                ring_buff_dequeue(io->rx_buff, buf + i, 1);
87
            }
88
            io->type = IO_REQUEST_MODE_READ;
89
            io->mode.read.buffer = buf + ring_count;
90
            io->mode.read.count = count - ring_count;
91
        }
92
    }
93

94
    irq_unlock();
95
    sem_wait(&io->semaphore);
96
    return count;
97
}
98

99
int io_request_readv(io_request_t* io, struct iovec* iov, size_t iov_count) {
100
    int ret = 0;
101
    for(int i = 0; i < iov_count; i++) {
102
        ret += io_request_read(io, iov[i].iov_base, iov[i].iov_len);
103
    }
104
    return ret;
105
}
106

107
int io_request_read_timeout(io_request_t* io, char* buf, size_t count, int32_t timeout_ms) {
108
    irq_lock();
109
    int ring_count = ring_buff_get_cnt(io->rx_buff);
110
    int read_count = ring_count > count ? count : ring_count;
111
    if(timeout_ms < 0) {
112
        /* No timeout, flush all we got */
113
        for(int i = 0; i < read_count; i++) {
114
            ring_buff_dequeue(io->rx_buff, buf + i, 1);
115
        }
116
        irq_unlock();
117
        return read_count;
118
    } else {
119
        if(ring_count >= count) {
120
            /* we can satisfy call right now */
121
            for(int i = 0; i < count; i++) {
122
                ring_buff_dequeue(io->rx_buff, buf + i, 1);
123
            }
124
            irq_unlock();
125
            return count;
126
        } else {
127
            for(int i = 0; i < ring_count; i++) {
128
                ring_buff_dequeue(io->rx_buff, buf + i, 1);
129
            }
130
            io->type = IO_REQUEST_MODE_READ;
131
            io->mode.read.buffer = buf + ring_count;
132
            io->mode.read.count = count - ring_count;
133
        }
134
    }
135

136
    irq_unlock();
137
    // sched_wait_timeout(clock_t timeout, clock_t *remain);
138
    struct timespec current_time = {};
139
    clock_gettime(CLOCK_REALTIME, &current_time);
140
    struct timespec wait_time = timespec_add(current_time, ns_to_timespec((uint64_t)(timeout_ms) * 1000000));\
141
    int wait_res = sem_timedwait(&io->semaphore, &wait_time);
142
    if(wait_res == -ETIMEDOUT) {
143
        int ret = count - io->mode.read.count;
144
        io_request_clean(io);
145
        return ret;
146
    }
147
    return count;
148
}
149

150
int io_request_readv_timeout(io_request_t* io, struct iovec* iov, size_t iov_count, int32_t timeout_ms_per_iov) {
151
    int ret = 0;
152
    for(int i = 0; i < iov_count; i++) {
153
        int iov_ret = io_request_read_timeout(io, iov[i].iov_base, iov[i].iov_len, timeout_ms_per_iov);
154
        ret += iov_ret;
155
        if(iov_ret < iov[i].iov_len) {
156
            /* timeout */
157
            break;
158
        }
159
    }
160

161
    return ret;
162
}
163

164
int io_request_write(io_request_t* io, char* buf, size_t count) {
165
    for(size_t i = 0; i < count; i++) {
166
        uart_putc(io->uart, buf[i]);
167
    }
168
    return count;
169
}
170

171
/* NOTE TODO: this should only be invoked while driver is in IO_REQUEST_MODE_NONE mode - for
172
 * now we dont have any checks because if drivers goes to IO_REQUEST_MODE_READ mode driver does
173
 * not return to user until it goes to IO_REQUEST_MODE_NONE mode again */
174
void io_request_discard_next_bytes(io_request_t* io, size_t count) {
175
    if(count > 0) {
176
        io->type = IO_REQUEST_MODE_DISCARD;
177
        io->mode.discard.count = count;
178
    }
179
}
180

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.