13
#include <net/l3/ipv4/ip_fragment.h>
14
#include <net/netdevice.h>
15
#include <net/skbuff.h>
16
#include <net/l3/icmpv4.h>
17
#include <net/l3/ipv4/ip.h>
19
#include <mem/objalloc.h>
20
#include <kernel/time/timer.h>
21
#include <kernel/time/time.h>
24
#include <lib/libds/dlist.h>
27
#include <framework/mod/options.h>
29
#define MAX_BUFS_CNT OPTION_GET(NUMBER, max_uncomplete_cnt)
30
#define IP_FRAGMENTED_SUPP OPTION_GET(NUMBER, ip_fragmented_support)
36
struct sk_buff_head fragments;
37
struct dlist_head next_buf;
44
int is_last_frag_received;
51
static DLIST_DEFINE(__dgram_buf_list);
52
static struct sys_timer ip_frag_timer;
54
OBJALLOC_DEF(__dgram_bufs, struct dgram_buf, MAX_BUFS_CNT);
56
#define df_flag(skb) (ntohs(skb->nh.iph->frag_off) & IP_DF)
58
#define TIMER_TICK 1000
60
static struct dgram_buf *ip_buf_create(struct iphdr *iph);
61
static void buf_delete(struct dgram_buf *buf);
62
static void ip_buf_add_skb(struct dgram_buf *buf, struct sk_buff *skb);
63
static struct sk_buff *build_packet(struct dgram_buf *buf);
65
static inline void buf_set_deleted(struct dgram_buf *buf) {
67
skb_queue_purge(&buf->fragments);
70
static inline int ip_offset(struct sk_buff *skb) {
73
offset = ntohs(skb->nh.iph->frag_off);
80
static void ttl_handler(struct sys_timer *timer, void *param) {
81
struct dgram_buf *buf = NULL;
86
dlist_foreach_entry(buf, &__dgram_buf_list, next_buf) {
88
if (buf->is_deleted) {
92
if (buf->buf_ttl == 0) {
105
static inline struct dgram_buf *ip_find(struct iphdr *iph) {
106
struct dgram_buf *buf = NULL;
110
dlist_foreach_entry(buf, &__dgram_buf_list, next_buf) {
111
if (buf->is_deleted) {
115
if (buf->buf_id.daddr == iph->daddr
116
&& buf->buf_id.saddr == iph->saddr
117
&& buf->buf_id.protocol == iph->proto
118
&& buf->buf_id.id == iph->id) {
126
static void ip_buf_add_skb(struct dgram_buf *buf, struct sk_buff *skb) {
127
int offset, data_len, end;
133
buf->buf_ttl = max(buf->buf_ttl, skb->nh.iph->ttl >> 4);
135
offset = ip_offset(skb);
137
data_len = skb->len - (skb->h.raw - skb->mac.raw);
138
end = offset + data_len;
140
skb_queue_push(&buf->fragments, skb);
142
buf->meat += data_len;
143
if (end > buf->len) {
148
static struct sk_buff *build_packet(struct dgram_buf *buf) {
149
struct sk_buff *skb, *skb_iter;
155
skb_iter = skb_queue_front(&buf->fragments);
158
ihlen = (skb_iter->h.raw - skb_iter->mac.raw);
159
skb = skb_alloc(buf->len + ihlen);
168
while((skb_iter = skb_queue_pop(&buf->fragments))) {
169
offset = ip_offset(skb_iter);
171
memcpy(skb->mac.raw, skb_iter->mac.raw, skb_iter->len);
173
skb->nh.raw = skb->mac.raw + (skb_iter->nh.raw - skb_iter->mac.raw);
174
skb->h.raw = skb->nh.raw + IP_HEADER_SIZE(ip_hdr(skb_iter));
175
skb->nh.iph->tot_len = htons(buf->len + IP_HEADER_SIZE(ip_hdr(skb_iter)));
176
skb->dev = skb_iter->dev;
178
memcpy(skb->mac.raw + ihlen + offset, skb_iter->mac.raw + ihlen,
179
skb_iter->len - ihlen);
184
skb->len = buf->len + ihlen;
185
buf_set_deleted(buf);
190
static struct dgram_buf *ip_buf_create(struct iphdr *iph) {
191
struct dgram_buf *buf;
195
buf = (struct dgram_buf*) objalloc(&__dgram_bufs);
200
if (!timer_is_inited(&ip_frag_timer)) {
201
timer_init(&ip_frag_timer, TIMER_PERIODIC, ttl_handler, NULL);
202
log_debug("timer init");
204
timer_start(&ip_frag_timer, ms2jiffies(TIMER_TICK));
206
skb_queue_init(&buf->fragments);
207
dlist_head_init(&buf->next_buf);
208
dlist_add_prev(&buf->next_buf, &__dgram_buf_list);
210
buf->buf_id.protocol = iph->proto;
211
buf->buf_id.id = iph->id;
212
buf->buf_id.saddr = iph->saddr;
213
buf->buf_id.daddr = iph->daddr;
215
buf->is_last_frag_received = 0;
223
static void buf_delete(struct dgram_buf *buf) {
224
dlist_del(&buf->next_buf);
225
objfree(&__dgram_bufs, (void*)buf);
228
static struct sk_buff *ip_frag_build(const struct sk_buff *big_skb, int frag_offset,
229
int frag_size, int mf_flag) {
230
struct sk_buff * frag;
231
int len = big_skb->dev->hdr_len + IP_HEADER_SIZE(big_skb->nh.iph);
233
if (unlikely(!(frag = skb_alloc(frag_size)))) {
238
memcpy(frag->mac.raw, big_skb->mac.raw, len);
240
memcpy(frag->mac.raw + len, big_skb->mac.raw + frag_offset, frag_size - len);
241
frag->nh.raw = frag->mac.raw + big_skb->dev->hdr_len;
242
frag->nh.iph->frag_off = htons(
243
(((frag_offset - len) >> 3) ) | mf_flag);
244
frag->nh.iph->tot_len = htons(frag_size - big_skb->dev->hdr_len);
249
struct sk_buff *ip_defrag(struct sk_buff *skb) {
250
struct dgram_buf *buf;
256
if (!(IP_FRAGMENTED_SUPP) || df_flag(skb)) {
259
skb = (sk_buff_t *)NULL;
263
buf = ip_find(skb->nh.iph);
265
buf = ip_buf_create(skb->nh.iph);
269
ip_buf_add_skb(buf, skb);
271
mf_flag = ntohs(skb->nh.iph->frag_off) & IP_MF;
272
buf->is_last_frag_received = !mf_flag;
274
if (buf->is_last_frag_received && buf->meat == buf->len) {
275
return build_packet(buf);
281
int ip_frag(const struct sk_buff *skb, uint32_t mtu,
282
struct sk_buff_head *tx_buf) {
283
struct sk_buff *fragment;
289
assert(skb->dev != NULL);
291
skb_queue_init(tx_buf);
292
if (!IP_FRAGMENTED_SUPP) {
293
skb_queue_push(tx_buf, (struct sk_buff *) skb);
297
offset = len = skb->dev->hdr_len + IP_HEADER_SIZE(skb->nh.iph);
300
align_MTU = mtu - (mtu - len) % 8;
303
while (offset < skb->len - align_MTU + len) {
304
fragment = ip_frag_build(skb, offset, align_MTU, IP_MF);
306
skb_queue_purge(tx_buf);
309
skb_queue_push(tx_buf, fragment);
310
offset += (align_MTU - len);
314
if (offset < skb->len) {
315
fragment = ip_frag_build(skb, offset, skb->len - offset + len, 0);
317
skb_queue_purge(tx_buf);
320
skb_queue_push(tx_buf, fragment);