summaryrefslogtreecommitdiff
path: root/kern/cbuf.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-06-17 18:45:31 +0200
committerRichard Braun <rbraun@sceen.net>2017-06-17 18:48:58 +0200
commit0a5af0a62ba2ed1c17404b1dd8ed15b0ff41abf7 (patch)
treee6e97bfdb129cd9a4a8e4db90b10460d71a17b1e /kern/cbuf.c
parentbf32d26490a6fd688194f67856563f0a5a843668 (diff)
kern/cbuf: implement buffered reads and writes
This change brings an interface for fast buffered accesses to the content of a circular buffer, and also an interface to write into a circular buffer at custom locations, in exchange for a small interface break of cbuf_read.
Diffstat (limited to 'kern/cbuf.c')
-rw-r--r--kern/cbuf.c87
1 files changed, 78 insertions, 9 deletions
diff --git a/kern/cbuf.c b/kern/cbuf.c
index ef5c7e33..83032e50 100644
--- a/kern/cbuf.c
+++ b/kern/cbuf.c
@@ -17,6 +17,7 @@
#include <assert.h>
#include <stddef.h>
+#include <string.h>
#include <kern/cbuf.h>
#include <kern/error.h>
@@ -42,18 +43,23 @@ cbuf_index(const struct cbuf *cbuf, size_t abs_index)
return abs_index & (cbuf->capacity - 1);
}
-void
-cbuf_push(struct cbuf *cbuf, char byte)
+static void
+cbuf_update_start(struct cbuf *cbuf)
{
- cbuf->buf[cbuf_index(cbuf, cbuf->end)] = byte;
- cbuf->end++;
-
/* Mind integer overflows */
if (cbuf_size(cbuf) > cbuf->capacity) {
cbuf->start = cbuf->end - cbuf->capacity;
}
}
+void
+cbuf_push(struct cbuf *cbuf, char byte)
+{
+ cbuf->buf[cbuf_index(cbuf, cbuf->end)] = byte;
+ cbuf->end++;
+ cbuf_update_start(cbuf);
+}
+
int
cbuf_pop(struct cbuf *cbuf, char *bytep)
{
@@ -67,13 +73,76 @@ cbuf_pop(struct cbuf *cbuf, char *bytep)
}
int
-cbuf_read(const struct cbuf *cbuf, size_t index, char *bytep)
+cbuf_write(struct cbuf *cbuf, size_t index, const char *buf, size_t size)
{
- /* Mind integer overflows */
- if ((cbuf->end - index - 1) >= cbuf_size(cbuf)) {
+ char *start, *end, *buf_end;
+ size_t new_end, skip;
+
+ if ((cbuf->end - index) > cbuf_size(cbuf)) {
+ return ERROR_INVAL;
+ }
+
+ new_end = index + size;
+
+ if ((new_end - cbuf->start) > cbuf_size(cbuf)) {
+ cbuf->end = new_end;
+
+ if (size > cbuf_capacity(cbuf)) {
+ skip = size - cbuf_capacity(cbuf);
+ buf += skip;
+ index += skip;
+ size = cbuf_capacity(cbuf);
+ }
+ }
+
+ start = &cbuf->buf[cbuf_index(cbuf, index)];
+ end = start + size;
+ buf_end = cbuf->buf + cbuf->capacity;
+
+ if (end > buf_end) {
+ skip = buf_end - start;
+ memcpy(start, buf, skip);
+ buf += skip;
+ start = cbuf->buf;
+ size -= skip;
+ }
+
+ memcpy(start, buf, size);
+ cbuf_update_start(cbuf);
+ return 0;
+}
+
+int
+cbuf_read(const struct cbuf *cbuf, size_t index, char *buf, size_t *sizep)
+{
+ const char *start, *end, *buf_end;
+ size_t size;
+
+ size = cbuf->end - index;
+
+ /* At least one byte must be available */
+ if ((size - 1) >= cbuf_size(cbuf)) {
return ERROR_INVAL;
}
- *bytep = cbuf->buf[cbuf_index(cbuf, index)];
+ if (*sizep > size) {
+ *sizep = size;
+ }
+
+ start = &cbuf->buf[cbuf_index(cbuf, index)];
+ end = start + *sizep;
+ buf_end = cbuf->buf + cbuf->capacity;
+
+ if (end <= buf_end) {
+ size = *sizep;
+ } else {
+ size = buf_end - start;
+ memcpy(buf, start, size);
+ buf += size;
+ start = cbuf->buf;
+ size = *sizep - size;
+ }
+
+ memcpy(buf, start, size);
return 0;
}