Subversion Repositories shark

Rev

Rev 967 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1063 tullio 1
 
2
/*
3
 * This program is free software; you can redistribute it and/or modify
4
 * it under the terms of the GNU General Public License as published by
5
 * the Free Software Foundation; either version 2 of the License, or
6
 * (at your option) any later version.
7
 *
8
 * This program is distributed in the hope that it will be useful,
9
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
 * GNU General Public License for more details.
12
 *
13
 * You should have received a copy of the GNU General Public License
14
 * along with this program; if not, write to the Free Software
15
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
 *
17
 */
18
 
262 giacomo 19
#include <kernel/func.h>
20
#include <kernel/kern.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include "string.h"
24
 
25
#include "drivers/udpip.h"
26
 
27
#include "tftp.h"
28
#include "endn.h"
29
 
30
/*  */
967 pj 31
#include "sem/sem/sem.h"
262 giacomo 32
 
33
char local_ip_addr[20];
34
char host_ip_addr[20];
35
 
36
/* The value is incremented when assigning a new port address to a new
37
 * connection.
38
 */
39
int port_counter;
40
 
41
/* The fixed IP/port (=69) to submit the connection requesting */
42
UDP_ADDR connection_request;
43
 
44
TFTP_MODEL model[MAX_CONCURRENT_STREAM];
45
sem_t *model_sem[MAX_CONCURRENT_STREAM];
46
 
47
TFTP_BUFFER buffer[MAX_CONCURRENT_STREAM];
48
//QUEUE queue[MAX_CONCURRENT_STREAM];
49
sem_t *buffer_sem[MAX_CONCURRENT_STREAM];
50
 
51
WORD tftp_get_data(TFTP_PACKET *pkt, BYTE *data, int n) {
52
        memcpy(data, pkt->u.data.data, n);
53
        return(pkt->u.data.block);
54
}
55
 
56
int tftp_get_ack_block(TFTP_PACKET *pkt) {
57
        return(pkt->u.ack.block);
58
}
59
 
60
int tftp_get_error(TFTP_PACKET *pkt, char *errmsg) {
61
        strcpy(errmsg, pkt->u.err.errmsg);
62
        return(pkt->u.err.errcode);
63
}
64
 
65
/* Returns the packet opcode.
66
 */
67
int tftp_get_opcode(TFTP_PACKET *pkt) {
68
  WORD tmp;
69
  tmp = pkt->opcode;
70
  SWAP_SHORT(tmp);    /* Swap endian!! */
71
  return(tmp);
72
}
73
 
74
int tftp_fill_request(TFTP_PACKET *pkt, WORD opcode, const BYTE *filename, const BYTE *mode) {
75
  int i, j;
76
 
77
  pkt->opcode = opcode;                         /* Put the opcode in the right struct field */
78
  SWAP_SHORT(pkt->opcode);    /* Swap endian!! */
79
 
80
        /* Reset the filename field */
81
  memset(pkt->u.request.filename, 0, sizeof(pkt->u.request.filename));
82
 
83
        /* Concats the containing filename and mode NULL terminatd strings in the filename field */
84
  for (i = 0; i < strlen(filename); i++)
85
     pkt->u.request.filename[i] = filename[i];
86
  pkt->u.request.filename[i] = '\0';
87
  for (j = 0, i = i + 1; j < strlen(mode); i++, j++)
88
     pkt->u.request.filename[i] = mode[j];
89
  pkt->u.request.filename[i] = '\0';
90
 
91
  return(0);
92
}
93
 
94
int tftp_fill_data(TFTP_PACKET *pkt, WORD nblock, BYTE *rawdata, WORD datasize) {
95
        if (datasize > TFTP_DATA_SIZE) { return(1); }   /* Overflow checking */
96
 
97
        pkt->opcode = TFTP_DATA;        /* Put the DATA opcode in the opcode field */
98
        SWAP_SHORT(pkt->opcode);  /* Swap endian!! */
99
 
100
  pkt->u.data.block = nblock;
101
  SWAP_SHORT(pkt->u.data.block); /* Swap endian!! */
102
 
103
  memcpy(pkt->u.data.data, rawdata, datasize);  /* ??? Maybe some data manipulation required!!! */
104
  return(0);
105
}
106
 
107
int tftp_fill_ack(TFTP_PACKET *pkt, WORD nblock) {
108
        pkt->opcode = TFTP_ACK;         /* Put the ACK opcode in the opcode field */
109
        SWAP_SHORT(pkt->opcode);  /* Swap endian!! */
110
 
111
  pkt->u.ack.block = nblock;
112
        return(0);
113
}
114
 
115
void tftp_reset_handle(int h) {
116
        model[h].status = TFTP_NOT_CONNECTED;
117
  model[h].errcode = TFTP_NO_ERROR;
118
  model[h].handle = -1;
119
  model[h].sender_pid = -1;
120
  model[h].receiver_pid = -1;
121
        model[h].nblock = 0;
122
        model[h].waiting_ack = 0;
123
        model[h].timestamp = 0;
124
        model[h].timeout = TFTP_DEFAULT_TIMEOUT;
125
        model[h].ntimeout = TFTP_DEFAULT_TIMEOUT_NUMBER;
126
 
127
        buffer[h].data = NULL;
128
        buffer[h].size = 0;
129
        buffer[h].nbytes = 0;
130
 
131
        model_sem[h] = NULL;
132
        buffer_sem[h] = NULL;
133
}
134
 
135
int tftp_init() {
136
  int i;
137
 
138
  for (i = 0; i < MAX_CONCURRENT_STREAM; i++) {
139
    tftp_reset_handle(i);
140
  }
141
 
142
  port_counter = 0;
143
 
144
  return(0);
145
}
146
 
147
int tftp_net_start(char *local_ip, char *host_ip, int init_net) {
148
  struct net_model m = net_base;
149
  int netval;
150
 
151
        /* Save IPs locally */
152
  strcpy(local_ip_addr, local_ip);
153
  strcpy(host_ip_addr, host_ip);
154
 
155
  netval = 0;
156
 
157
  if (init_net) {
158
    net_setmode(m, TXTASK);                     /* We want a task for TX mutual exclusion */
267 giacomo 159
    net_setudpip(m, local_ip,"255.255.255.255");        /* We use UDP/IP stack */
262 giacomo 160
 
161
    /* OK: let's start the NetLib! */
162
    netval = net_init(&m);
163
  }
164
 
165
  return(netval);
166
}
167
 
168
int tftp_setup_timeout(int h, int sec) {
169
  if (model[h].handle != TFTP_NOT_CONNECTED) return(-1);
170
  model[h].timeout = sec * 1000000;
171
  return(0);
172
}
173
 
174
int tftp_set_timeout_numbers(int h, int n) {
175
  if (model[h].handle != TFTP_NOT_CONNECTED) return(-1);
176
  model[h].ntimeout = n;
177
  return(0);
178
}
179
 
180
int tftp_open(char *fname) {
181
  int i;
182
 
183
  /* Finds the first free connection slot */
184
  for (i = 0; i < MAX_CONCURRENT_STREAM; i++)
185
     if (model[i].status == TFTP_NOT_CONNECTED) break;
186
  if (i >= MAX_CONCURRENT_STREAM) return(-1);   /* No connection slots available */
187
 
188
  model[i].handle = i;                                                  /* Handle = index in the struct array */
189
  strcpy(model[i].filename, fname);     /* Save filename into struct */
190
        model[i].status = TFTP_OPEN;              /* Connection opened */
191
  sem_init(model_sem[i], 0, 1);
192
 
193
  return(i);
194
}
195
 
196
TASK upload_sender(int id) {
197
  TFTP_PACKET pkt;
198
  char data[TFTP_DATA_SIZE];
199
  int mystatus;
200
  int i, n;
201
 
202
  i = 0;
203
  while(1) {
204
    sem_wait(model_sem[id]);
205
 
206
    if (model[id].waiting_ack) {   /* and status != error ??? */
207
      if (sys_gettime(NULL) - model[id].timestamp >= model[id].timeout) {   /* ??? check it!!! */
208
        if (!model[id].ntimeout) {
209
          model[id].status = TFTP_ERR;
210
          model[id].errcode = TFTP_ERR_TIMEOUT;
211
          sem_post(model_sem[id]);
212
        } else {
213
          model[id].ntimeout--;
214
          model[id].timestamp = sys_gettime(NULL);
215
          sem_post(model_sem[id]);
216
                                        udp_sendto(model[id].socket, (char*)(&model[id].last_sent), sizeof(TFTP_PACKET), &model[id].host);
217
        }
218
      } else {
219
        sem_post(model_sem[id]);
220
      }
221
    } else {
222
      mystatus = model[id].status;
223
      sem_post(model_sem[id]);
224
 
225
      switch (mystatus) {
226
        case TFTP_ACTIVE : {
227
 
228
          /* Doesn't use mutex 'cause uses "static" model fields */
229
          tftp_fill_request(&pkt, TFTP_WRITE_REQUEST, model[id].filename, TFTP_OCTET_MODE);
230
          udp_sendto(model[id].socket, (char*)(&pkt), sizeof(TFTP_PACKET), &connection_request);
231
                                memcpy(&model[id].last_sent, &pkt, sizeof(TFTP_PACKET));                /* Save the last sent packet for retransmission */
232
 
233
          sem_wait(model_sem[id]);
234
          if (model[id].status != TFTP_ERR)
235
            model[id].status = TFTP_CONNECTION_REQUESTING;
236
          else {
237
            sem_post(model_sem[id]);
238
            break;
239
          }
240
          model[id].waiting_ack = 1;
241
          model[id].timestamp = sys_gettime(NULL);
242
          sem_post(model_sem[id]);
243
 
244
          break;
245
        }
246
        case TFTP_CONNECTION_REQUESTING : {
247
        }
248
        case TFTP_STREAMING : {
249
          if (tftp_usedbuffer(id) >= TFTP_DATA_SIZE) {
250
            n = tftp_get(id, data, TFTP_DATA_SIZE);
251
            tftp_fill_data(&pkt, model[id].nblock, data, n);
252
 
253
            udp_sendto(model[id].socket, (char*)(&pkt), sizeof(TFTP_PACKET), &model[id].host);
254
                                memcpy(&model[id].last_sent, &pkt, sizeof(TFTP_PACKET));                /* Save the last sent packet for retransmission */
255
 
256
            sem_wait(model_sem[id]);
257
            model[id].waiting_ack = 1;
258
            model[id].timestamp = sys_gettime(NULL);
259
            sem_post(model_sem[id]);
260
          }
261
          break;
262
        }
263
        case TFTP_FLUSHING : {
264
                n = tftp_usedbuffer(id);
265
          if (n >= TFTP_DATA_SIZE) {
266
 
267
            /* Get data for a full data packet */
268
            n = tftp_get(id, data, TFTP_DATA_SIZE);
269
            tftp_fill_data(&pkt, model[id].nblock, data, n);
270
 
271
            udp_sendto(model[id].socket, (char*)(&pkt), sizeof(TFTP_PACKET), &model[id].host);
272
                                memcpy(&model[id].last_sent, &pkt, sizeof(TFTP_PACKET));                /* Save the last sent packet for retransmission */
273
 
274
            sem_wait(model_sem[id]);
275
            model[id].waiting_ack = 1;
276
            model[id].timestamp = sys_gettime(NULL);
277
            sem_post(model_sem[id]);
278
          } else {
279
 
280
            /* Get remaining data from buffer */
281
            n = tftp_get(id, data, n);
282
            tftp_fill_data(&pkt, model[id].nblock, data, n);
283
 
284
            /* Sending 4 extra bytes for opcode and block number!! */
285
            udp_sendto(model[id].socket, (char*)(&pkt), sizeof(n + 4), &model[id].host);
286
 
287
                                                /* Don't wait for ack!! Maybe will be implemented later... */
288
            task_kill(model[id].receiver_pid);
289
            /* ..... */
290
            task_abort(NULL);
291
          }
292
          break;
293
        }
294
 
295
        case TFTP_ERROR : {
296
          break;
297
        }
298
      }
299
    }
300
 
301
    task_testcancel();
302
                task_endcycle();
303
  }
304
  return(0);
305
}
306
 
307
/* This non real-time task reads UDP packets with ACK from the network
308
 */
309
TASK upload_receiver(int id) {
310
  char msg[200];
311
  int mystatus;
312
  int n;
313
  int i;
314
  WORD opcode;
315
  TFTP_PACKET pkt;
316
  UDP_ADDR server;
317
 
318
  i = 0;
319
  while (1) {
320
    sem_wait(model_sem[id]);
321
    mystatus = model[id].status;
322
    sem_post(model_sem[id]);
323
 
324
    if (mystatus != TFTP_ERR) {
325
      n = udp_recvfrom(model[id].socket, &pkt, &server);
326
      opcode = tftp_get_opcode(&pkt);
327
 
328
      if (opcode == TFTP_ERROR) {
329
        n = tftp_get_error(&pkt, msg);   // re-use n: not too orthodox...
330
 
331
        sem_wait(model_sem[id]);
332
        model[id].status = TFTP_ERR;
333
        model[id].errcode = n;
334
        strcpy(model[id].errmsg, msg);
335
        sem_post(model_sem[id]);
336
 
337
      } else {
338
        switch (mystatus) {
339
          case TFTP_NOT_CONNECTED : {
340
            // discard the packet... set error??
341
            break;
342
          }
343
          case TFTP_CONNECTION_REQUESTING : {
344
            sem_wait(model_sem[id]);
345
            memcpy(&model[id].host, &server, sizeof(model[id].host));
346
            model[id].waiting_ack = 0;
347
            model[id].status = TFTP_STREAMING;
348
            model[id].nblock++;
349
            sem_post(model_sem[id]);
350
 
351
            break;
352
          }
353
          case TFTP_STREAMING : {
354
            // check the nblock on the arrived packet
355
 
356
            sem_wait(model_sem[id]);
357
            model[id].waiting_ack = 0;
358
            model[id].nblock++;
359
            sem_post(model_sem[id]);
360
            break;
361
          }
362
        }
363
      }
364
    }
365
    i++;
366
  }
367
 
368
  return(0);
369
}
370
 
371
int tftp_upload(int i, unsigned long buffsize, sem_t *mtx) {
372
  SOFT_TASK_MODEL soft_m;
373
  NRT_TASK_MODEL nrt_m;
374
 
375
  if ((buffer_sem[i] = mtx) == NULL) return(-3);                                                                        /* ??? check assignment!!! */
376
 
377
  if ((buffer[i].size = buffsize) > MAX_BUFFER_SIZE) return(-2);        /* Buffer size too large */
378
  if ((buffer[i].data = malloc(buffsize)) == NULL) return(-4);          /* Buffer allocation error */
379
  buffer[i].nbytes = 0;
380
 
381
  /* Create a socket for transmission */
382
  ip_str2addr(local_ip_addr, &(model[i].local.s_addr));
383
  model[i].local.s_port = BASE_PORT + port_counter;     /* Different port for each connection */
384
  port_counter++;
385
 
386
  ip_str2addr(host_ip_addr, &(connection_request.s_addr));
387
  connection_request.s_port = 69;   /* It is fixed for the connection request */
388
 
389
        model[i].socket = udp_bind(&model[i].local, NULL);
390
 
391
  /* First we set the sender's task properties... */
392
  soft_task_default_model(soft_m);
393
  soft_task_def_level(soft_m, 0);
394
  soft_task_def_arg(soft_m, (void *)(i));
395
  soft_task_def_group(soft_m, i);
396
  soft_task_def_periodic(soft_m);
397
  soft_task_def_wcet(soft_m, TFTP_UPLOAD_SENDER_WCET);
398
  soft_task_def_period(soft_m, TFTP_UPLOAD_SENDER_PERIOD);
399
  soft_task_def_met(soft_m, TFTP_UPLOAD_SENDER_MET);
400
 
401
  model[i].sender_pid = task_create("upload_sender", upload_sender, &soft_m, NULL);
402
 
403
  if (model[i].sender_pid == -1) {
404
     free(buffer[i].data);
405
     tftp_reset_handle(i);
406
     return(-5);
407
  }
408
 
409
  nrt_task_default_model(nrt_m);        /* Start the receiver task... */
410
  nrt_task_def_arg(nrt_m, (void *)(i));
411
  if ((model[i].receiver_pid = task_create("upload_receiver", upload_receiver, &nrt_m, NULL)) == NIL) {
412
     free(buffer[i].data);
413
     tftp_reset_handle(i);
414
     return(-6);
415
  }
416
 
417
        model[i].status = TFTP_ACTIVE;          /* Connection active */
418
  if (task_activate(model[i].sender_pid) == -1) {
419
     free(buffer[i].data);
420
     tftp_reset_handle(i);
421
     return(-7);
422
  }
423
  if (task_activate(model[i].receiver_pid) == -1) {
424
     free(buffer[i].data);   // Maybe not correct... sys_panic() may be better
425
     tftp_reset_handle(i);
426
     return(-8);
427
  }
428
 
429
  return(0);
430
}
431
 
432
int tftp_download(int i, unsigned long buffsize, sem_t *mtx) {
433
  return(0);
434
}
435
 
436
int tftp_close(int h, int hardness) {
437
  TFTP_PACKET pkt;
438
 
439
        if (hardness == TFTP_STOP_NOW) {
440
    task_kill(model[h].sender_pid);
441
        task_kill(model[h].receiver_pid);
442
    tftp_fill_data(&pkt, model[h].nblock, NULL, 0);
443
    udp_sendto(model[h].socket, (char*)(&pkt), 4, &model[h].host);
444
    tftp_reset_handle(h);
445
    free(buffer[h].data);
446
    sem_destroy(buffer_sem[h]);
447
    sem_destroy(model_sem[h]);
448
  } else {
449
        sem_wait(model_sem[h]);
450
    model[h].status = TFTP_FLUSHING;
451
        sem_post(model_sem[h]);
452
  }
453
 
454
        return(0);
455
}
456
 
457
int tftp_put(int h, BYTE *rawdata, WORD n) {
458
        sem_wait(buffer_sem[h]);
459
 
460
        /* Buffer overflow checking */
461
  if (buffer[h].nbytes + n > buffer[h].size) {  /* Maybe ">"??? */
462
                sem_post(buffer_sem[h]);
463
                return(1);
464
  }
465
 
466
        /* Check this carefully!!! */
467
        memcpy(buffer[h].data + buffer[h].nbytes, rawdata, n);
468
        buffer[h].nbytes += n;
469
 
470
  sem_post(buffer_sem[h]);
471
 
472
  return(0);
473
}
474
 
475
int tftp_get(int h, BYTE *rawdata, WORD n) {
476
//  cprintf("get mutex %d - use %d\n", buffer_sem[h]->mutexlevel, buffer_sem[h]->use);
477
        sem_wait(buffer_sem[h]);
478
 
479
        if (buffer[h].nbytes < 1) return(0);
480
  if (buffer[h].nbytes < n) n = buffer[h].nbytes;
481
 
482
        /* Check this carefully!!! */
483
        memcpy(rawdata, buffer[h].data, n);                                                     /* Export data to calling function */
484
        memcpy(buffer[h].data, buffer[h].data + n, n);  /* Shift data into buffer */
485
        buffer[h].nbytes -= n;
486
 
487
  sem_post(buffer_sem[h]);
488
        return(n);
489
}
490
 
491
int tftp_getbuffersize(int h) {
492
        return(buffer[h].size);         /* We on't use the mutex 'cause the size is read-only */
493
}
494
 
495
int tftp_usedbuffer(int h) {
496
        int n;
497
 
498
//  cprintf("used mutex %d - use %d\n", buffer_sem[h]->mutexlevel, buffer_sem[h]->use);
499
        sem_wait(buffer_sem[h]);
500
        n = buffer[h].nbytes;
501
        sem_post(buffer_sem[h]);
502
        return(n);
503
}
504
 
505
int tftp_freebuffer(int h) {
506
        int n;
507
 
508
        sem_wait(buffer_sem[h]);
509
        n = buffer[h].size - buffer[h].nbytes;
510
        sem_post(buffer_sem[h]);
511
        return(n);
512
}
513
 
514
int tftp_status(int h) {
515
        int n;
516
 
517
        sem_wait(model_sem[h]);
518
        n = model[h].status;
519
        sem_post(model_sem[h]);
520
        return(n);
521
}
522
 
523
///////////////////////////////////////////////////////////////////////////
524
 
525
int debug_setbuffer(int h, int size) {
526
  if ((buffer[h].data = malloc(size)) == NULL) return(-1);              /* Buffer allocation error */
527
  buffer[h].size = size;
528
  buffer[h].nbytes = 0;
529
  return(0);
530
}
531
 
532
void debug_freebuffer(int h) {
533
  free(buffer[h].data);         /* Buffer allocation error */
534
  buffer[h].size = 0;
535
  buffer[h].nbytes = 0;
536
}
537