1
|
/*
|
2
|
* Buffering of output and input.
|
3
|
* Copyright (C) 1998 Kunihiro Ishiguro
|
4
|
*
|
5
|
* This file is part of GNU Zebra.
|
6
|
*
|
7
|
* GNU Zebra is free software; you can redistribute it and/or modify
|
8
|
* it under the terms of the GNU General Public License as published
|
9
|
* by the Free Software Foundation; either version 2, or (at your
|
10
|
* option) any later version.
|
11
|
*
|
12
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
13
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
14
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15
|
* General Public License for more details.
|
16
|
*
|
17
|
* You should have received a copy of the GNU General Public License
|
18
|
* along with GNU Zebra; see the file COPYING. If not, write to the
|
19
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
20
|
* Boston, MA 02111-1307, USA.
|
21
|
*/
|
22
|
|
23
|
#include <stdio.h>
|
24
|
#include <stdlib.h>
|
25
|
#include <unistd.h>
|
26
|
#include <string.h>
|
27
|
#include <errno.h>
|
28
|
#include <stddef.h>
|
29
|
#include <sys/uio.h>
|
30
|
|
31
|
#include "buffer.h"
|
32
|
#include "vty.h"
|
33
|
|
34
|
/* Buffer master. */
|
35
|
struct buffer {
|
36
|
/* Data list. */
|
37
|
struct buffer_data *head;
|
38
|
struct buffer_data *tail;
|
39
|
|
40
|
/* Size of each buffer_data chunk. */
|
41
|
size_t size;
|
42
|
};
|
43
|
|
44
|
/* Data container. */
|
45
|
struct buffer_data {
|
46
|
struct buffer_data *next;
|
47
|
|
48
|
/* Location to add new data. */
|
49
|
size_t cp;
|
50
|
|
51
|
/* Pointer to data not yet flushed. */
|
52
|
size_t sp;
|
53
|
|
54
|
/* Actual data stream (variable length). */
|
55
|
unsigned char data[0]; /* real dimension is buffer->size */
|
56
|
};
|
57
|
|
58
|
/* It should always be true that: 0 <= sp <= cp <= size */
|
59
|
|
60
|
/* Default buffer size (used if none specified). It is rounded up to the
|
61
|
next page boundery. */
|
62
|
#define BUFFER_SIZE_DEFAULT 4096
|
63
|
|
64
|
#define BUFFER_DATA_FREE(D) free((D))
|
65
|
|
66
|
/* Make new buffer. */
|
67
|
struct buffer *buffer_new(size_t size)
|
68
|
{
|
69
|
struct buffer *b;
|
70
|
|
71
|
b = calloc(1, sizeof(struct buffer));
|
72
|
|
73
|
if (size)
|
74
|
b->size = size;
|
75
|
else {
|
76
|
static size_t default_size;
|
77
|
if (!default_size) {
|
78
|
long pgsz = sysconf(_SC_PAGESIZE);
|
79
|
default_size =
|
80
|
((((BUFFER_SIZE_DEFAULT - 1) / pgsz) + 1) * pgsz);
|
81
|
}
|
82
|
b->size = default_size;
|
83
|
}
|
84
|
|
85
|
return b;
|
86
|
}
|
87
|
|
88
|
/* Free buffer. */
|
89
|
void buffer_free(struct buffer *b)
|
90
|
{
|
91
|
buffer_reset(b);
|
92
|
free(b);
|
93
|
}
|
94
|
|
95
|
/* Make string clone. */
|
96
|
char *buffer_getstr(struct buffer *b)
|
97
|
{
|
98
|
size_t totlen = 0;
|
99
|
struct buffer_data *data;
|
100
|
char *s;
|
101
|
char *p;
|
102
|
|
103
|
for (data = b->head; data; data = data->next)
|
104
|
totlen += data->cp - data->sp;
|
105
|
if (!(s = malloc(totlen + 1)))
|
106
|
return NULL;
|
107
|
p = s;
|
108
|
for (data = b->head; data; data = data->next) {
|
109
|
memcpy(p, data->data + data->sp, data->cp - data->sp);
|
110
|
p += data->cp - data->sp;
|
111
|
}
|
112
|
*p = '\0';
|
113
|
return s;
|
114
|
}
|
115
|
|
116
|
/* Return 1 if buffer is empty. */
|
117
|
int buffer_empty(struct buffer *b)
|
118
|
{
|
119
|
return (b->head == NULL);
|
120
|
}
|
121
|
|
122
|
/* Clear and free all allocated data. */
|
123
|
void buffer_reset(struct buffer *b)
|
124
|
{
|
125
|
struct buffer_data *data;
|
126
|
struct buffer_data *next;
|
127
|
|
128
|
for (data = b->head; data; data = next) {
|
129
|
next = data->next;
|
130
|
BUFFER_DATA_FREE(data);
|
131
|
}
|
132
|
b->head = b->tail = NULL;
|
133
|
}
|
134
|
|
135
|
/* Add buffer_data to the end of buffer. */
|
136
|
static struct buffer_data *buffer_add(struct buffer *b)
|
137
|
{
|
138
|
struct buffer_data *d;
|
139
|
|
140
|
d = malloc(offsetof(struct buffer_data, data[b->size]));
|
141
|
if (!d)
|
142
|
return NULL;
|
143
|
d->cp = d->sp = 0;
|
144
|
d->next = NULL;
|
145
|
|
146
|
if (b->tail)
|
147
|
b->tail->next = d;
|
148
|
else
|
149
|
b->head = d;
|
150
|
b->tail = d;
|
151
|
|
152
|
return d;
|
153
|
}
|
154
|
|
155
|
/* Write data to buffer. */
|
156
|
void buffer_put(struct buffer *b, const void *p, size_t size)
|
157
|
{
|
158
|
struct buffer_data *data = b->tail;
|
159
|
const char *ptr = p;
|
160
|
|
161
|
/* We use even last one byte of data buffer. */
|
162
|
while (size) {
|
163
|
size_t chunk;
|
164
|
|
165
|
/* If there is no data buffer add it. */
|
166
|
if (data == NULL || data->cp == b->size)
|
167
|
data = buffer_add(b);
|
168
|
|
169
|
chunk =
|
170
|
((size <=
|
171
|
(b->size - data->cp)) ? size : (b->size - data->cp));
|
172
|
memcpy((data->data + data->cp), ptr, chunk);
|
173
|
size -= chunk;
|
174
|
ptr += chunk;
|
175
|
data->cp += chunk;
|
176
|
}
|
177
|
}
|
178
|
|
179
|
/* Insert character into the buffer. */
|
180
|
void buffer_putc(struct buffer *b, u_char c)
|
181
|
{
|
182
|
buffer_put(b, &c, 1);
|
183
|
}
|
184
|
|
185
|
/* Put string to the buffer. */
|
186
|
void buffer_putstr(struct buffer *b, const char *c)
|
187
|
{
|
188
|
buffer_put(b, c, strlen(c));
|
189
|
}
|
190
|
|
191
|
/* Keep flushing data to the fd until the buffer is empty or an error is
|
192
|
encountered or the operation would block. */
|
193
|
buffer_status_t buffer_flush_all(struct buffer *b, int fd)
|
194
|
{
|
195
|
buffer_status_t ret;
|
196
|
struct buffer_data *head;
|
197
|
size_t head_sp;
|
198
|
|
199
|
if (!b->head)
|
200
|
return BUFFER_EMPTY;
|
201
|
head_sp = (head = b->head)->sp;
|
202
|
/* Flush all data. */
|
203
|
while ((ret = buffer_flush_available(b, fd)) == BUFFER_PENDING) {
|
204
|
if ((b->head == head) && (head_sp == head->sp)
|
205
|
&& (errno != EINTR))
|
206
|
/* No data was flushed, so kernel buffer must be full. */
|
207
|
return ret;
|
208
|
head_sp = (head = b->head)->sp;
|
209
|
}
|
210
|
|
211
|
return ret;
|
212
|
}
|
213
|
|
214
|
#if 0
|
215
|
/* Flush enough data to fill a terminal window of the given scene (used only
|
216
|
by vty telnet interface). */
|
217
|
buffer_status_t
|
218
|
buffer_flush_window(struct buffer * b, int fd, int width, int height,
|
219
|
int erase_flag, int no_more_flag)
|
220
|
{
|
221
|
int nbytes;
|
222
|
int iov_alloc;
|
223
|
int iov_index;
|
224
|
struct iovec *iov;
|
225
|
struct iovec small_iov[3];
|
226
|
char more[] = " --More-- ";
|
227
|
char erase[] =
|
228
|
{ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
229
|
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
|
230
|
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08
|
231
|
};
|
232
|
struct buffer_data *data;
|
233
|
int column;
|
234
|
|
235
|
if (!b->head)
|
236
|
return BUFFER_EMPTY;
|
237
|
|
238
|
if (height < 1) {
|
239
|
zlog_warn
|
240
|
("%s called with non-positive window height %d, forcing to 1",
|
241
|
__func__, height);
|
242
|
height = 1;
|
243
|
} else if (height >= 2)
|
244
|
height--;
|
245
|
if (width < 1) {
|
246
|
zlog_warn
|
247
|
("%s called with non-positive window width %d, forcing to 1",
|
248
|
__func__, width);
|
249
|
width = 1;
|
250
|
}
|
251
|
|
252
|
/* For erase and more data add two to b's buffer_data count. */
|
253
|
if (b->head->next == NULL) {
|
254
|
iov_alloc = sizeof(small_iov) / sizeof(small_iov[0]);
|
255
|
iov = small_iov;
|
256
|
} else {
|
257
|
iov_alloc = ((height * (width + 2)) / b->size) + 10;
|
258
|
iov = XMALLOC(MTYPE_TMP, iov_alloc * sizeof(*iov));
|
259
|
}
|
260
|
iov_index = 0;
|
261
|
|
262
|
/* Previously print out is performed. */
|
263
|
if (erase_flag) {
|
264
|
iov[iov_index].iov_base = erase;
|
265
|
iov[iov_index].iov_len = sizeof erase;
|
266
|
iov_index++;
|
267
|
}
|
268
|
|
269
|
/* Output data. */
|
270
|
column = 1; /* Column position of next character displayed. */
|
271
|
for (data = b->head; data && (height > 0); data = data->next) {
|
272
|
size_t cp;
|
273
|
|
274
|
cp = data->sp;
|
275
|
while ((cp < data->cp) && (height > 0)) {
|
276
|
/* Calculate lines remaining and column position after displaying
|
277
|
this character. */
|
278
|
if (data->data[cp] == '\r')
|
279
|
column = 1;
|
280
|
else if ((data->data[cp] == '\n') || (column == width)) {
|
281
|
column = 1;
|
282
|
height--;
|
283
|
} else
|
284
|
column++;
|
285
|
cp++;
|
286
|
}
|
287
|
iov[iov_index].iov_base = (char *)(data->data + data->sp);
|
288
|
iov[iov_index++].iov_len = cp - data->sp;
|
289
|
data->sp = cp;
|
290
|
|
291
|
if (iov_index == iov_alloc)
|
292
|
/* This should not ordinarily happen. */
|
293
|
{
|
294
|
iov_alloc *= 2;
|
295
|
if (iov != small_iov) {
|
296
|
zlog_warn("%s: growing iov array to %d; "
|
297
|
"width %d, height %d, size %lu",
|
298
|
__func__, iov_alloc, width, height,
|
299
|
(u_long) b->size);
|
300
|
iov =
|
301
|
XREALLOC(MTYPE_TMP, iov,
|
302
|
iov_alloc * sizeof(*iov));
|
303
|
} else {
|
304
|
/* This should absolutely never occur. */
|
305
|
zlog_err
|
306
|
("%s: corruption detected: iov_small overflowed; "
|
307
|
"head %p, tail %p, head->next %p",
|
308
|
__func__, b->head, b->tail, b->head->next);
|
309
|
iov =
|
310
|
XMALLOC(MTYPE_TMP,
|
311
|
iov_alloc * sizeof(*iov));
|
312
|
memcpy(iov, small_iov, sizeof(small_iov));
|
313
|
}
|
314
|
}
|
315
|
}
|
316
|
|
317
|
/* In case of `more' display need. */
|
318
|
if (b->tail && (b->tail->sp < b->tail->cp) && !no_more_flag) {
|
319
|
iov[iov_index].iov_base = more;
|
320
|
iov[iov_index].iov_len = sizeof more;
|
321
|
iov_index++;
|
322
|
}
|
323
|
#ifdef IOV_MAX
|
324
|
/* IOV_MAX are normally defined in <sys/uio.h> , Posix.1g.
|
325
|
example: Solaris2.6 are defined IOV_MAX size at 16. */
|
326
|
{
|
327
|
struct iovec *c_iov = iov;
|
328
|
nbytes = 0; /* Make sure it's initialized. */
|
329
|
|
330
|
while (iov_index > 0) {
|
331
|
int iov_size;
|
332
|
|
333
|
iov_size =
|
334
|
((iov_index > IOV_MAX) ? IOV_MAX : iov_index);
|
335
|
if ((nbytes = writev(fd, c_iov, iov_size)) < 0) {
|
336
|
zlog_warn("%s: writev to fd %d failed: %s",
|
337
|
__func__, fd, safe_strerror(errno));
|
338
|
break;
|
339
|
}
|
340
|
|
341
|
/* move pointer io-vector */
|
342
|
c_iov += iov_size;
|
343
|
iov_index -= iov_size;
|
344
|
}
|
345
|
}
|
346
|
#else /* IOV_MAX */
|
347
|
if ((nbytes = writev(fd, iov, iov_index)) < 0)
|
348
|
zlog_warn("%s: writev to fd %d failed: %s",
|
349
|
__func__, fd, safe_strerror(errno));
|
350
|
#endif /* IOV_MAX */
|
351
|
|
352
|
/* Free printed buffer data. */
|
353
|
while (b->head && (b->head->sp == b->head->cp)) {
|
354
|
struct buffer_data *del;
|
355
|
if (!(b->head = (del = b->head)->next))
|
356
|
b->tail = NULL;
|
357
|
BUFFER_DATA_FREE(del);
|
358
|
}
|
359
|
|
360
|
if (iov != small_iov)
|
361
|
XFREE(MTYPE_TMP, iov);
|
362
|
|
363
|
return (nbytes < 0) ? BUFFER_ERROR :
|
364
|
(b->head ? BUFFER_PENDING : BUFFER_EMPTY);
|
365
|
}
|
366
|
#endif
|
367
|
|
368
|
/* This function (unlike other buffer_flush* functions above) is designed
|
369
|
to work with non-blocking sockets. It does not attempt to write out
|
370
|
all of the queued data, just a "big" chunk. It returns 0 if it was
|
371
|
able to empty out the buffers completely, 1 if more flushing is
|
372
|
required later, or -1 on a fatal write error. */
|
373
|
buffer_status_t buffer_flush_available(struct buffer * b, int fd)
|
374
|
{
|
375
|
|
376
|
/* These are just reasonable values to make sure a significant amount of
|
377
|
data is written. There's no need to go crazy and try to write it all
|
378
|
in one shot. */
|
379
|
#ifdef IOV_MAX
|
380
|
#define MAX_CHUNKS ((IOV_MAX >= 16) ? 16 : IOV_MAX)
|
381
|
#else
|
382
|
#define MAX_CHUNKS 16
|
383
|
#endif
|
384
|
#define MAX_FLUSH 131072
|
385
|
|
386
|
struct buffer_data *d;
|
387
|
size_t written;
|
388
|
struct iovec iov[MAX_CHUNKS];
|
389
|
size_t iovcnt = 0;
|
390
|
size_t nbyte = 0;
|
391
|
|
392
|
for (d = b->head; d && (iovcnt < MAX_CHUNKS) && (nbyte < MAX_FLUSH);
|
393
|
d = d->next, iovcnt++) {
|
394
|
iov[iovcnt].iov_base = d->data + d->sp;
|
395
|
nbyte += (iov[iovcnt].iov_len = d->cp - d->sp);
|
396
|
}
|
397
|
|
398
|
if (!nbyte)
|
399
|
/* No data to flush: should we issue a warning message? */
|
400
|
return BUFFER_EMPTY;
|
401
|
|
402
|
/* only place where written should be sign compared */
|
403
|
if ((ssize_t) (written = writev(fd, iov, iovcnt)) < 0) {
|
404
|
if (ERRNO_IO_RETRY(errno))
|
405
|
/* Calling code should try again later. */
|
406
|
return BUFFER_PENDING;
|
407
|
return BUFFER_ERROR;
|
408
|
}
|
409
|
|
410
|
/* Free printed buffer data. */
|
411
|
while (written > 0) {
|
412
|
struct buffer_data *d;
|
413
|
if (!(d = b->head))
|
414
|
break;
|
415
|
if (written < d->cp - d->sp) {
|
416
|
d->sp += written;
|
417
|
return BUFFER_PENDING;
|
418
|
}
|
419
|
|
420
|
written -= (d->cp - d->sp);
|
421
|
if (!(b->head = d->next))
|
422
|
b->tail = NULL;
|
423
|
BUFFER_DATA_FREE(d);
|
424
|
}
|
425
|
|
426
|
return b->head ? BUFFER_PENDING : BUFFER_EMPTY;
|
427
|
|
428
|
#undef MAX_CHUNKS
|
429
|
#undef MAX_FLUSH
|
430
|
}
|
431
|
|
432
|
buffer_status_t
|
433
|
buffer_write(struct buffer * b, int fd, const void *p, size_t size)
|
434
|
{
|
435
|
ssize_t nbytes;
|
436
|
|
437
|
#if 0
|
438
|
/* Should we attempt to drain any previously buffered data? This could help reduce latency in pushing out the data if we are stuck in a long-running thread that is preventing the main select loop from calling the flush thread... */
|
439
|
|
440
|
if (b->head && (buffer_flush_available(b, fd) == BUFFER_ERROR))
|
441
|
return BUFFER_ERROR;
|
442
|
#endif
|
443
|
if (b->head)
|
444
|
/* Buffer is not empty, so do not attempt to write the new data. */
|
445
|
nbytes = 0;
|
446
|
else if ((nbytes = write(fd, p, size)) < 0) {
|
447
|
if (ERRNO_IO_RETRY(errno))
|
448
|
nbytes = 0;
|
449
|
else
|
450
|
return BUFFER_ERROR;
|
451
|
}
|
452
|
/* Add any remaining data to the buffer. */
|
453
|
{
|
454
|
size_t written = nbytes;
|
455
|
if (written < size)
|
456
|
buffer_put(b, ((const char *)p) + written,
|
457
|
size - written);
|
458
|
}
|
459
|
return b->head ? BUFFER_PENDING : BUFFER_EMPTY;
|
460
|
}
|