Subversion Repositories shark

Rev

Rev 262 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
262 giacomo 1
#include <kernel/func.h>
2
#include <kernel/kern.h>
3
#include <stdlib.h>
4
#include <stdio.h>
5
#include "string.h"
6
 
7
#include "drivers/udpip.h"
8
 
9
#include "tftp.h"
10
#include "endn.h"
11
 
12
/*  */
13
#include "modules/sem.h"
14
 
15
char local_ip_addr[20];
16
char host_ip_addr[20];
17
 
18
/* The value is incremented when assigning a new port address to a new
19
 * connection.
20
 */
21
int port_counter;
22
 
23
/* The fixed IP/port (=69) to submit the connection requesting */
24
UDP_ADDR connection_request;
25
 
26
TFTP_MODEL model[MAX_CONCURRENT_STREAM];
27
sem_t *model_sem[MAX_CONCURRENT_STREAM];
28
 
29
TFTP_BUFFER buffer[MAX_CONCURRENT_STREAM];
30
//QUEUE queue[MAX_CONCURRENT_STREAM];
31
sem_t *buffer_sem[MAX_CONCURRENT_STREAM];
32
 
33
WORD tftp_get_data(TFTP_PACKET *pkt, BYTE *data, int n) {
34
        memcpy(data, pkt->u.data.data, n);
35
        return(pkt->u.data.block);
36
}
37
 
38
int tftp_get_ack_block(TFTP_PACKET *pkt) {
39
        return(pkt->u.ack.block);
40
}
41
 
42
int tftp_get_error(TFTP_PACKET *pkt, char *errmsg) {
43
        strcpy(errmsg, pkt->u.err.errmsg);
44
        return(pkt->u.err.errcode);
45
}
46
 
47
/* Returns the packet opcode.
48
 */
49
int tftp_get_opcode(TFTP_PACKET *pkt) {
50
  WORD tmp;
51
  tmp = pkt->opcode;
52
  SWAP_SHORT(tmp);    /* Swap endian!! */
53
  return(tmp);
54
}
55
 
56
int tftp_fill_request(TFTP_PACKET *pkt, WORD opcode, const BYTE *filename, const BYTE *mode) {
57
  int i, j;
58
 
59
  pkt->opcode = opcode;                         /* Put the opcode in the right struct field */
60
  SWAP_SHORT(pkt->opcode);    /* Swap endian!! */
61
 
62
        /* Reset the filename field */
63
  memset(pkt->u.request.filename, 0, sizeof(pkt->u.request.filename));
64
 
65
        /* Concats the containing filename and mode NULL terminatd strings in the filename field */
66
  for (i = 0; i < strlen(filename); i++)
67
     pkt->u.request.filename[i] = filename[i];
68
  pkt->u.request.filename[i] = '\0';
69
  for (j = 0, i = i + 1; j < strlen(mode); i++, j++)
70
     pkt->u.request.filename[i] = mode[j];
71
  pkt->u.request.filename[i] = '\0';
72
 
73
  return(0);
74
}
75
 
76
int tftp_fill_data(TFTP_PACKET *pkt, WORD nblock, BYTE *rawdata, WORD datasize) {
77
        if (datasize > TFTP_DATA_SIZE) { return(1); }   /* Overflow checking */
78
 
79
        pkt->opcode = TFTP_DATA;        /* Put the DATA opcode in the opcode field */
80
        SWAP_SHORT(pkt->opcode);  /* Swap endian!! */
81
 
82
  pkt->u.data.block = nblock;
83
  SWAP_SHORT(pkt->u.data.block); /* Swap endian!! */
84
 
85
  memcpy(pkt->u.data.data, rawdata, datasize);  /* ??? Maybe some data manipulation required!!! */
86
  return(0);
87
}
88
 
89
int tftp_fill_ack(TFTP_PACKET *pkt, WORD nblock) {
90
        pkt->opcode = TFTP_ACK;         /* Put the ACK opcode in the opcode field */
91
        SWAP_SHORT(pkt->opcode);  /* Swap endian!! */
92
 
93
  pkt->u.ack.block = nblock;
94
        return(0);
95
}
96
 
97
void tftp_reset_handle(int h) {
98
        model[h].status = TFTP_NOT_CONNECTED;
99
  model[h].errcode = TFTP_NO_ERROR;
100
  model[h].handle = -1;
101
  model[h].sender_pid = -1;
102
  model[h].receiver_pid = -1;
103
        model[h].nblock = 0;
104
        model[h].waiting_ack = 0;
105
        model[h].timestamp = 0;
106
        model[h].timeout = TFTP_DEFAULT_TIMEOUT;
107
        model[h].ntimeout = TFTP_DEFAULT_TIMEOUT_NUMBER;
108
 
109
        buffer[h].data = NULL;
110
        buffer[h].size = 0;
111
        buffer[h].nbytes = 0;
112
 
113
        model_sem[h] = NULL;
114
        buffer_sem[h] = NULL;
115
}
116
 
117
int tftp_init() {
118
  int i;
119
 
120
  for (i = 0; i < MAX_CONCURRENT_STREAM; i++) {
121
    tftp_reset_handle(i);
122
  }
123
 
124
  port_counter = 0;
125
 
126
  return(0);
127
}
128
 
129
int tftp_net_start(char *local_ip, char *host_ip, int init_net) {
130
  struct net_model m = net_base;
131
  int netval;
132
 
133
        /* Save IPs locally */
134
  strcpy(local_ip_addr, local_ip);
135
  strcpy(host_ip_addr, host_ip);
136
 
137
  netval = 0;
138
 
139
  if (init_net) {
140
    net_setmode(m, TXTASK);                     /* We want a task for TX mutual exclusion */
267 giacomo 141
    net_setudpip(m, local_ip,"255.255.255.255");        /* We use UDP/IP stack */
262 giacomo 142
 
143
    /* OK: let's start the NetLib! */
144
    netval = net_init(&m);
145
  }
146
 
147
  return(netval);
148
}
149
 
150
int tftp_setup_timeout(int h, int sec) {
151
  if (model[h].handle != TFTP_NOT_CONNECTED) return(-1);
152
  model[h].timeout = sec * 1000000;
153
  return(0);
154
}
155
 
156
int tftp_set_timeout_numbers(int h, int n) {
157
  if (model[h].handle != TFTP_NOT_CONNECTED) return(-1);
158
  model[h].ntimeout = n;
159
  return(0);
160
}
161
 
162
int tftp_open(char *fname) {
163
  int i;
164
 
165
  /* Finds the first free connection slot */
166
  for (i = 0; i < MAX_CONCURRENT_STREAM; i++)
167
     if (model[i].status == TFTP_NOT_CONNECTED) break;
168
  if (i >= MAX_CONCURRENT_STREAM) return(-1);   /* No connection slots available */
169
 
170
  model[i].handle = i;                                                  /* Handle = index in the struct array */
171
  strcpy(model[i].filename, fname);     /* Save filename into struct */
172
        model[i].status = TFTP_OPEN;              /* Connection opened */
173
  sem_init(model_sem[i], 0, 1);
174
 
175
  return(i);
176
}
177
 
178
TASK upload_sender(int id) {
179
  TFTP_PACKET pkt;
180
  char data[TFTP_DATA_SIZE];
181
  int mystatus;
182
  int i, n;
183
 
184
  i = 0;
185
  while(1) {
186
    sem_wait(model_sem[id]);
187
 
188
    if (model[id].waiting_ack) {   /* and status != error ??? */
189
      if (sys_gettime(NULL) - model[id].timestamp >= model[id].timeout) {   /* ??? check it!!! */
190
        if (!model[id].ntimeout) {
191
          model[id].status = TFTP_ERR;
192
          model[id].errcode = TFTP_ERR_TIMEOUT;
193
          sem_post(model_sem[id]);
194
        } else {
195
          model[id].ntimeout--;
196
          model[id].timestamp = sys_gettime(NULL);
197
          sem_post(model_sem[id]);
198
                                        udp_sendto(model[id].socket, (char*)(&model[id].last_sent), sizeof(TFTP_PACKET), &model[id].host);
199
        }
200
      } else {
201
        sem_post(model_sem[id]);
202
      }
203
    } else {
204
      mystatus = model[id].status;
205
      sem_post(model_sem[id]);
206
 
207
      switch (mystatus) {
208
        case TFTP_ACTIVE : {
209
 
210
          /* Doesn't use mutex 'cause uses "static" model fields */
211
          tftp_fill_request(&pkt, TFTP_WRITE_REQUEST, model[id].filename, TFTP_OCTET_MODE);
212
          udp_sendto(model[id].socket, (char*)(&pkt), sizeof(TFTP_PACKET), &connection_request);
213
                                memcpy(&model[id].last_sent, &pkt, sizeof(TFTP_PACKET));                /* Save the last sent packet for retransmission */
214
 
215
          sem_wait(model_sem[id]);
216
          if (model[id].status != TFTP_ERR)
217
            model[id].status = TFTP_CONNECTION_REQUESTING;
218
          else {
219
            sem_post(model_sem[id]);
220
            break;
221
          }
222
          model[id].waiting_ack = 1;
223
          model[id].timestamp = sys_gettime(NULL);
224
          sem_post(model_sem[id]);
225
 
226
          break;
227
        }
228
        case TFTP_CONNECTION_REQUESTING : {
229
        }
230
        case TFTP_STREAMING : {
231
          if (tftp_usedbuffer(id) >= TFTP_DATA_SIZE) {
232
            n = tftp_get(id, data, TFTP_DATA_SIZE);
233
            tftp_fill_data(&pkt, model[id].nblock, data, n);
234
 
235
            udp_sendto(model[id].socket, (char*)(&pkt), sizeof(TFTP_PACKET), &model[id].host);
236
                                memcpy(&model[id].last_sent, &pkt, sizeof(TFTP_PACKET));                /* Save the last sent packet for retransmission */
237
 
238
            sem_wait(model_sem[id]);
239
            model[id].waiting_ack = 1;
240
            model[id].timestamp = sys_gettime(NULL);
241
            sem_post(model_sem[id]);
242
          }
243
          break;
244
        }
245
        case TFTP_FLUSHING : {
246
                n = tftp_usedbuffer(id);
247
          if (n >= TFTP_DATA_SIZE) {
248
 
249
            /* Get data for a full data packet */
250
            n = tftp_get(id, data, TFTP_DATA_SIZE);
251
            tftp_fill_data(&pkt, model[id].nblock, data, n);
252
 
253
            udp_sendto(model[id].socket, (char*)(&pkt), sizeof(TFTP_PACKET), &model[id].host);
254
                                memcpy(&model[id].last_sent, &pkt, sizeof(TFTP_PACKET));                /* Save the last sent packet for retransmission */
255
 
256
            sem_wait(model_sem[id]);
257
            model[id].waiting_ack = 1;
258
            model[id].timestamp = sys_gettime(NULL);
259
            sem_post(model_sem[id]);
260
          } else {
261
 
262
            /* Get remaining data from buffer */
263
            n = tftp_get(id, data, n);
264
            tftp_fill_data(&pkt, model[id].nblock, data, n);
265
 
266
            /* Sending 4 extra bytes for opcode and block number!! */
267
            udp_sendto(model[id].socket, (char*)(&pkt), sizeof(n + 4), &model[id].host);
268
 
269
                                                /* Don't wait for ack!! Maybe will be implemented later... */
270
            task_kill(model[id].receiver_pid);
271
            /* ..... */
272
            task_abort(NULL);
273
          }
274
          break;
275
        }
276
 
277
        case TFTP_ERROR : {
278
          break;
279
        }
280
      }
281
    }
282
 
283
    task_testcancel();
284
                task_endcycle();
285
  }
286
  return(0);
287
}
288
 
289
/* This non real-time task reads UDP packets with ACK from the network
290
 */
291
TASK upload_receiver(int id) {
292
  char msg[200];
293
  int mystatus;
294
  int n;
295
  int i;
296
  WORD opcode;
297
  TFTP_PACKET pkt;
298
  UDP_ADDR server;
299
 
300
  i = 0;
301
  while (1) {
302
    sem_wait(model_sem[id]);
303
    mystatus = model[id].status;
304
    sem_post(model_sem[id]);
305
 
306
    if (mystatus != TFTP_ERR) {
307
      n = udp_recvfrom(model[id].socket, &pkt, &server);
308
      opcode = tftp_get_opcode(&pkt);
309
 
310
      if (opcode == TFTP_ERROR) {
311
        n = tftp_get_error(&pkt, msg);   // re-use n: not too orthodox...
312
 
313
        sem_wait(model_sem[id]);
314
        model[id].status = TFTP_ERR;
315
        model[id].errcode = n;
316
        strcpy(model[id].errmsg, msg);
317
        sem_post(model_sem[id]);
318
 
319
      } else {
320
        switch (mystatus) {
321
          case TFTP_NOT_CONNECTED : {
322
            // discard the packet... set error??
323
            break;
324
          }
325
          case TFTP_CONNECTION_REQUESTING : {
326
            sem_wait(model_sem[id]);
327
            memcpy(&model[id].host, &server, sizeof(model[id].host));
328
            model[id].waiting_ack = 0;
329
            model[id].status = TFTP_STREAMING;
330
            model[id].nblock++;
331
            sem_post(model_sem[id]);
332
 
333
            break;
334
          }
335
          case TFTP_STREAMING : {
336
            // check the nblock on the arrived packet
337
 
338
            sem_wait(model_sem[id]);
339
            model[id].waiting_ack = 0;
340
            model[id].nblock++;
341
            sem_post(model_sem[id]);
342
            break;
343
          }
344
        }
345
      }
346
    }
347
    i++;
348
  }
349
 
350
  return(0);
351
}
352
 
353
int tftp_upload(int i, unsigned long buffsize, sem_t *mtx) {
354
  SOFT_TASK_MODEL soft_m;
355
  NRT_TASK_MODEL nrt_m;
356
 
357
  if ((buffer_sem[i] = mtx) == NULL) return(-3);                                                                        /* ??? check assignment!!! */
358
 
359
  if ((buffer[i].size = buffsize) > MAX_BUFFER_SIZE) return(-2);        /* Buffer size too large */
360
  if ((buffer[i].data = malloc(buffsize)) == NULL) return(-4);          /* Buffer allocation error */
361
  buffer[i].nbytes = 0;
362
 
363
  /* Create a socket for transmission */
364
  ip_str2addr(local_ip_addr, &(model[i].local.s_addr));
365
  model[i].local.s_port = BASE_PORT + port_counter;     /* Different port for each connection */
366
  port_counter++;
367
 
368
  ip_str2addr(host_ip_addr, &(connection_request.s_addr));
369
  connection_request.s_port = 69;   /* It is fixed for the connection request */
370
 
371
        model[i].socket = udp_bind(&model[i].local, NULL);
372
 
373
  /* First we set the sender's task properties... */
374
  soft_task_default_model(soft_m);
375
  soft_task_def_level(soft_m, 0);
376
  soft_task_def_arg(soft_m, (void *)(i));
377
  soft_task_def_group(soft_m, i);
378
  soft_task_def_periodic(soft_m);
379
  soft_task_def_wcet(soft_m, TFTP_UPLOAD_SENDER_WCET);
380
  soft_task_def_period(soft_m, TFTP_UPLOAD_SENDER_PERIOD);
381
  soft_task_def_met(soft_m, TFTP_UPLOAD_SENDER_MET);
382
 
383
  model[i].sender_pid = task_create("upload_sender", upload_sender, &soft_m, NULL);
384
 
385
  if (model[i].sender_pid == -1) {
386
     free(buffer[i].data);
387
     tftp_reset_handle(i);
388
     return(-5);
389
  }
390
 
391
  nrt_task_default_model(nrt_m);        /* Start the receiver task... */
392
  nrt_task_def_arg(nrt_m, (void *)(i));
393
  if ((model[i].receiver_pid = task_create("upload_receiver", upload_receiver, &nrt_m, NULL)) == NIL) {
394
     free(buffer[i].data);
395
     tftp_reset_handle(i);
396
     return(-6);
397
  }
398
 
399
        model[i].status = TFTP_ACTIVE;          /* Connection active */
400
  if (task_activate(model[i].sender_pid) == -1) {
401
     free(buffer[i].data);
402
     tftp_reset_handle(i);
403
     return(-7);
404
  }
405
  if (task_activate(model[i].receiver_pid) == -1) {
406
     free(buffer[i].data);   // Maybe not correct... sys_panic() may be better
407
     tftp_reset_handle(i);
408
     return(-8);
409
  }
410
 
411
  return(0);
412
}
413
 
414
int tftp_download(int i, unsigned long buffsize, sem_t *mtx) {
415
  return(0);
416
}
417
 
418
int tftp_close(int h, int hardness) {
419
  TFTP_PACKET pkt;
420
 
421
        if (hardness == TFTP_STOP_NOW) {
422
    task_kill(model[h].sender_pid);
423
        task_kill(model[h].receiver_pid);
424
    tftp_fill_data(&pkt, model[h].nblock, NULL, 0);
425
    udp_sendto(model[h].socket, (char*)(&pkt), 4, &model[h].host);
426
    tftp_reset_handle(h);
427
    free(buffer[h].data);
428
    sem_destroy(buffer_sem[h]);
429
    sem_destroy(model_sem[h]);
430
  } else {
431
        sem_wait(model_sem[h]);
432
    model[h].status = TFTP_FLUSHING;
433
        sem_post(model_sem[h]);
434
  }
435
 
436
        return(0);
437
}
438
 
439
int tftp_put(int h, BYTE *rawdata, WORD n) {
440
        sem_wait(buffer_sem[h]);
441
 
442
        /* Buffer overflow checking */
443
  if (buffer[h].nbytes + n > buffer[h].size) {  /* Maybe ">"??? */
444
                sem_post(buffer_sem[h]);
445
                return(1);
446
  }
447
 
448
        /* Check this carefully!!! */
449
        memcpy(buffer[h].data + buffer[h].nbytes, rawdata, n);
450
        buffer[h].nbytes += n;
451
 
452
  sem_post(buffer_sem[h]);
453
 
454
  return(0);
455
}
456
 
457
int tftp_get(int h, BYTE *rawdata, WORD n) {
458
//  cprintf("get mutex %d - use %d\n", buffer_sem[h]->mutexlevel, buffer_sem[h]->use);
459
        sem_wait(buffer_sem[h]);
460
 
461
        if (buffer[h].nbytes < 1) return(0);
462
  if (buffer[h].nbytes < n) n = buffer[h].nbytes;
463
 
464
        /* Check this carefully!!! */
465
        memcpy(rawdata, buffer[h].data, n);                                                     /* Export data to calling function */
466
        memcpy(buffer[h].data, buffer[h].data + n, n);  /* Shift data into buffer */
467
        buffer[h].nbytes -= n;
468
 
469
  sem_post(buffer_sem[h]);
470
        return(n);
471
}
472
 
473
int tftp_getbuffersize(int h) {
474
        return(buffer[h].size);         /* We on't use the mutex 'cause the size is read-only */
475
}
476
 
477
int tftp_usedbuffer(int h) {
478
        int n;
479
 
480
//  cprintf("used mutex %d - use %d\n", buffer_sem[h]->mutexlevel, buffer_sem[h]->use);
481
        sem_wait(buffer_sem[h]);
482
        n = buffer[h].nbytes;
483
        sem_post(buffer_sem[h]);
484
        return(n);
485
}
486
 
487
int tftp_freebuffer(int h) {
488
        int n;
489
 
490
        sem_wait(buffer_sem[h]);
491
        n = buffer[h].size - buffer[h].nbytes;
492
        sem_post(buffer_sem[h]);
493
        return(n);
494
}
495
 
496
int tftp_status(int h) {
497
        int n;
498
 
499
        sem_wait(model_sem[h]);
500
        n = model[h].status;
501
        sem_post(model_sem[h]);
502
        return(n);
503
}
504
 
505
///////////////////////////////////////////////////////////////////////////
506
 
507
int debug_setbuffer(int h, int size) {
508
  if ((buffer[h].data = malloc(size)) == NULL) return(-1);              /* Buffer allocation error */
509
  buffer[h].size = size;
510
  buffer[h].nbytes = 0;
511
  return(0);
512
}
513
 
514
void debug_freebuffer(int h) {
515
  free(buffer[h].data);         /* Buffer allocation error */
516
  buffer[h].size = 0;
517
  buffer[h].nbytes = 0;
518
}
519