2019-02-11 21:40:00 +00:00
|
|
|
#include "algo_skel.h"
|
|
|
|
|
2019-02-12 10:17:37 +00:00
|
|
|
struct naive_ctx {
|
2019-02-12 21:38:42 +00:00
|
|
|
int ref_count;
|
2019-02-18 20:55:53 +00:00
|
|
|
struct buffer_packet bps[10];
|
|
|
|
GQueue* free_buffer; // Available buffers
|
|
|
|
GHashTable* used_buffer; // Buffers used for reading or writing
|
|
|
|
GQueue* read_waiting; // Who wait to be notified for a read
|
|
|
|
GHashTable* write_waiting; // Structure to track packets waiting to be written
|
2019-02-12 10:17:37 +00:00
|
|
|
};
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
void on_tcp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo);
|
|
|
|
void on_tcp_write(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo);
|
|
|
|
void on_udp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo);
|
|
|
|
void on_udp_write (struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo);
|
|
|
|
|
2019-02-12 10:17:37 +00:00
|
|
|
void free_nothing(void* app_ctx) {}
|
2019-02-11 21:40:00 +00:00
|
|
|
void free_naive(void* app_ctx) {
|
2019-02-12 21:38:42 +00:00
|
|
|
struct naive_ctx* ctx = (struct naive_ctx*) app_ctx;
|
|
|
|
ctx->ref_count--;
|
2019-02-18 20:55:53 +00:00
|
|
|
if (ctx->ref_count > 0) return;
|
|
|
|
g_queue_free(ctx->free_buffer);
|
|
|
|
g_queue_free(ctx->read_waiting);
|
|
|
|
g_hash_table_destroy (ctx->used_buffer);
|
|
|
|
free(ctx);
|
2019-02-11 21:40:00 +00:00
|
|
|
}
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
void on_tcp_co(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|
|
|
int conn_sock1, conn_sock2;
|
2019-02-18 13:35:09 +00:00
|
|
|
struct sockaddr_in addr;
|
2019-02-11 21:40:00 +00:00
|
|
|
socklen_t in_len;
|
2019-02-18 20:55:53 +00:00
|
|
|
char url[1024], port[6];
|
2019-02-18 13:35:09 +00:00
|
|
|
struct evt_core_cat local_cat = {0};
|
2019-02-18 20:55:53 +00:00
|
|
|
struct evt_core_fdinfo to_fdinfo = {0};
|
|
|
|
to_fdinfo.cat = &local_cat;
|
|
|
|
to_fdinfo.url = url;
|
2019-02-11 21:40:00 +00:00
|
|
|
|
2019-02-12 16:38:11 +00:00
|
|
|
in_len = sizeof(addr);
|
2019-02-18 20:55:53 +00:00
|
|
|
conn_sock1 = accept(fdinfo->fd, (struct sockaddr*)&addr, &in_len);
|
2019-02-11 21:40:00 +00:00
|
|
|
|
2019-02-12 16:38:11 +00:00
|
|
|
if (conn_sock1 == -1) goto co_error;
|
|
|
|
conn_sock2 = dup(conn_sock1);
|
|
|
|
if (conn_sock2 == -1) goto co_error;
|
2019-02-12 18:29:00 +00:00
|
|
|
//printf("fd=%d accepts, creating fds=%d,%d\n", fd, conn_sock1, conn_sock2);
|
2019-02-18 13:35:09 +00:00
|
|
|
|
2019-02-19 09:32:33 +00:00
|
|
|
url_get_port(port, fdinfo->url);
|
2019-02-18 13:35:09 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
to_fdinfo.fd = conn_sock1;
|
|
|
|
to_fdinfo.cat->name = "tcp-read";
|
|
|
|
sprintf(to_fdinfo.url, "tcp:read:127.0.0.1:%s", port);
|
|
|
|
evt_core_add_fd (ctx, &to_fdinfo);
|
2019-02-18 13:35:09 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
to_fdinfo.fd = conn_sock2;
|
|
|
|
to_fdinfo.cat->name = "tcp-write";
|
|
|
|
sprintf(to_fdinfo.url, "tcp:write:127.0.0.1:%s", port);
|
|
|
|
evt_core_add_fd (ctx, &to_fdinfo);
|
2019-02-11 21:40:00 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
co_error:
|
|
|
|
perror("Failed to handle new connection");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
/**
|
|
|
|
* Returns a buffer if available, NULL otherwise
|
|
|
|
*/
|
|
|
|
struct buffer_packet* get_read_buffer(struct naive_ctx *app_ctx, struct evt_core_fdinfo *fdinfo) {
|
2019-02-18 16:51:13 +00:00
|
|
|
struct buffer_packet* bp;
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 1. Check if we don't have a buffer
|
|
|
|
bp = g_hash_table_lookup (app_ctx->used_buffer, &fdinfo->fd);
|
2019-02-18 16:51:13 +00:00
|
|
|
if (bp != NULL) return bp;
|
2019-02-18 20:55:53 +00:00
|
|
|
|
|
|
|
// 2. Get a new buffer otherwise
|
|
|
|
bp = g_queue_pop_head(app_ctx->free_buffer);
|
|
|
|
if (bp == NULL) {
|
|
|
|
// 2.1 If no buffer is available, we subscribe to be notified later
|
|
|
|
g_queue_push_tail (app_ctx->read_waiting, &(fdinfo->fd));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 3. Update state
|
|
|
|
g_hash_table_insert(app_ctx->used_buffer, &(fdinfo->fd), bp);
|
|
|
|
|
2019-02-18 16:51:13 +00:00
|
|
|
return bp;
|
2019-02-18 20:55:53 +00:00
|
|
|
}
|
2019-02-18 16:51:13 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
/**
|
|
|
|
* Returns a buffer if available, NULL otherwise
|
|
|
|
*/
|
|
|
|
struct buffer_packet* get_write_buffer(struct naive_ctx *app_ctx, struct evt_core_fdinfo *fdinfo) {
|
|
|
|
struct buffer_packet* bp;
|
|
|
|
GQueue* q;
|
|
|
|
|
|
|
|
// 1. Check if we don't have a buffer
|
|
|
|
bp = g_hash_table_lookup (app_ctx->used_buffer, &fdinfo->fd);
|
|
|
|
if (bp != NULL) return bp;
|
|
|
|
|
|
|
|
// 2. Check our waiting queue otherwise
|
|
|
|
if ((q = g_hash_table_lookup(app_ctx->write_waiting, &(fdinfo->fd))) == NULL) return NULL;
|
|
|
|
bp = g_queue_pop_head(q);
|
|
|
|
if (bp == NULL) return NULL; // No packet to process
|
|
|
|
|
|
|
|
// 3. Update state
|
|
|
|
g_hash_table_insert(app_ctx->used_buffer, &(fdinfo->fd), bp);
|
|
|
|
|
|
|
|
return bp;
|
2019-02-18 16:51:13 +00:00
|
|
|
}
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
void mv_buffer_rtow(struct naive_ctx* app_ctx,
|
|
|
|
struct evt_core_fdinfo* from,
|
|
|
|
struct evt_core_fdinfo* to,
|
|
|
|
struct buffer_packet* bp) {
|
|
|
|
|
|
|
|
// 1. We get the target writing queue
|
|
|
|
GQueue* q;
|
|
|
|
q = g_hash_table_lookup(app_ctx->write_waiting, &(to->fd));
|
|
|
|
if (q == NULL) {
|
|
|
|
q = g_queue_new ();
|
|
|
|
g_hash_table_insert(app_ctx->write_waiting, &(to->fd), q);
|
|
|
|
}
|
2019-02-18 16:51:13 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 2. We move the buffer to the target queue
|
|
|
|
g_hash_table_remove(app_ctx->used_buffer, &from->fd);
|
|
|
|
g_queue_push_tail(q, bp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mv_buffer_wtor(struct naive_ctx* app_ctx, struct evt_core_fdinfo* fdinfo, struct buffer_packet* bp) {
|
|
|
|
g_queue_push_tail (app_ctx->free_buffer, bp);
|
|
|
|
g_hash_table_remove(app_ctx->used_buffer, &(fdinfo->fd));
|
|
|
|
}
|
|
|
|
|
|
|
|
void notify_read(struct evt_core_ctx* ctx, struct naive_ctx* app_ctx) {
|
|
|
|
struct evt_core_fdinfo* next_fdinfo = NULL;
|
|
|
|
while (next_fdinfo == NULL) {
|
|
|
|
int fd = GPOINTER_TO_INT(g_queue_pop_head(app_ctx->read_waiting));
|
|
|
|
if (fd == 0) break;
|
|
|
|
next_fdinfo = evt_core_get_from_fd (ctx, fd);
|
|
|
|
if (strcmp(next_fdinfo->cat->name, "tcp-read") == 0) on_tcp_read(ctx, next_fdinfo);
|
|
|
|
else if (strcmp(next_fdinfo->cat->name, "udp-read") == 0) on_udp_read(ctx, next_fdinfo);
|
|
|
|
else {
|
|
|
|
fprintf(stderr, "A fd from category %s can't be stored in read_waiting\n", next_fdinfo->cat->name);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void on_tcp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
2019-02-18 16:51:13 +00:00
|
|
|
struct buffer_packet* bp;
|
|
|
|
struct evt_core_fdinfo *to_fdinfo = NULL;
|
2019-02-18 20:55:53 +00:00
|
|
|
struct naive_ctx* app_ctx = fdinfo->cat->app_ctx;
|
2019-02-18 16:51:13 +00:00
|
|
|
int read_res = FDS_READY;
|
|
|
|
char url[255];
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 1. Get current read buffer OR a new read buffer OR subscribe to be notified later
|
|
|
|
if ((bp = get_read_buffer(app_ctx, fdinfo)) == NULL) return;
|
|
|
|
|
|
|
|
// 2. Try to read a whole packet in the buffer
|
2019-02-18 16:51:13 +00:00
|
|
|
while (read_res != FDS_AGAIN && bp->mode == BP_READING) {
|
2019-02-18 20:55:53 +00:00
|
|
|
read_res = read_packet_from_tcp (fdinfo->fd, bp);
|
2019-02-18 16:51:13 +00:00
|
|
|
if (read_res == FDS_ERR) goto co_error;
|
|
|
|
}
|
2019-02-18 20:55:53 +00:00
|
|
|
if (bp->mode != BP_WRITING) return;
|
2019-02-18 16:51:13 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 3. A whole packet has been read, we will find someone to write it
|
2019-02-18 16:51:13 +00:00
|
|
|
sprintf(url, "udp:write:127.0.0.1:%d", bp->ip.ap.str.port);
|
|
|
|
to_fdinfo = evt_core_get_from_url (ctx, url);
|
|
|
|
if (to_fdinfo == NULL) goto co_error;
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 4. We move the buffer and notify the target
|
|
|
|
mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo, bp);
|
|
|
|
on_udp_write(ctx, to_fdinfo);
|
2019-02-18 16:51:13 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
co_error:
|
|
|
|
perror("Failed to TCP read");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
void on_tcp_write(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
2019-02-18 16:51:13 +00:00
|
|
|
struct buffer_packet* bp;
|
2019-02-18 20:55:53 +00:00
|
|
|
struct naive_ctx* app_ctx = fdinfo->cat->app_ctx;
|
2019-02-18 16:51:13 +00:00
|
|
|
int write_res = FDS_READY;
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 1. Get current write buffer OR a buffer from the waiting queue OR leave
|
|
|
|
if ((bp = get_write_buffer(app_ctx, fdinfo)) == NULL) return;
|
2019-02-18 16:51:13 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 2. Write data from the buffer to the socket
|
2019-02-18 16:51:13 +00:00
|
|
|
while (write_res != FDS_AGAIN && bp->mode == BP_WRITING) {
|
2019-02-18 20:55:53 +00:00
|
|
|
write_res = write_packet_to_tcp(fdinfo->fd, bp);
|
2019-02-18 16:51:13 +00:00
|
|
|
if (write_res == FDS_ERR) goto co_error;
|
|
|
|
}
|
2019-02-18 20:55:53 +00:00
|
|
|
if (bp->mode != BP_READING) return;
|
2019-02-18 16:51:13 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 3. A whole packet has been written
|
|
|
|
// Release the buffer and notify
|
|
|
|
mv_buffer_wtor(app_ctx, fdinfo, bp);
|
|
|
|
notify_read(ctx, app_ctx);
|
|
|
|
|
|
|
|
return;
|
2019-02-18 16:51:13 +00:00
|
|
|
co_error:
|
|
|
|
perror("Failed to TCP write");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
void on_udp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|
|
|
struct buffer_packet* bp;
|
|
|
|
struct evt_core_fdinfo *to_fdinfo;
|
|
|
|
struct naive_ctx* app_ctx = fdinfo->cat->app_ctx;
|
2019-02-14 10:16:38 +00:00
|
|
|
int read_res = FDS_READY;
|
2019-02-18 20:55:53 +00:00
|
|
|
char url[255];
|
2019-02-14 10:16:38 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 1. Get current read buffer OR a new read buffer OR subscribe to be notified later
|
|
|
|
if ((bp = get_read_buffer(app_ctx, fdinfo)) == NULL) return;
|
2019-02-14 10:16:38 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 2. Read packet from socket
|
2019-02-19 09:48:44 +00:00
|
|
|
bp->ip.ap.str.port = url_get_port_int (fdinfo->url);
|
2019-02-19 13:49:44 +00:00
|
|
|
read_res = read_packet_from_udp (fdinfo->fd, bp, fdinfo->other);
|
2019-02-18 20:55:53 +00:00
|
|
|
if (read_res == FDS_ERR) goto co_error;
|
|
|
|
if (bp->mode != BP_WRITING) return;
|
2019-02-14 10:16:38 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 3. A whole packet has been read, we will find someone to write it
|
2019-02-19 14:28:12 +00:00
|
|
|
sprintf(url, "tcp:write:127.0.0.1:7500");
|
2019-02-18 20:55:53 +00:00
|
|
|
to_fdinfo = evt_core_get_from_url (ctx, url);
|
|
|
|
if (to_fdinfo == NULL) goto co_error;
|
|
|
|
|
|
|
|
// 4. We move the buffer and notify the target
|
|
|
|
mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo, bp);
|
|
|
|
on_tcp_write(ctx, to_fdinfo);
|
2019-02-11 21:40:00 +00:00
|
|
|
|
2019-02-19 09:44:22 +00:00
|
|
|
return;
|
|
|
|
|
2019-02-11 21:40:00 +00:00
|
|
|
co_error:
|
2019-02-18 20:55:53 +00:00
|
|
|
perror("Failed to UDP read");
|
2019-02-11 21:40:00 +00:00
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
void on_udp_write (struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|
|
|
struct buffer_packet* bp;
|
|
|
|
struct naive_ctx* app_ctx = fdinfo->cat->app_ctx;
|
2019-02-14 12:50:43 +00:00
|
|
|
int write_res = FDS_READY;
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 1. Get current write buffer OR a buffer from the waiting queue OR leave
|
|
|
|
if ((bp = get_write_buffer(app_ctx, fdinfo)) == NULL) return;
|
2019-02-14 12:50:43 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 2. Write buffer
|
2019-02-19 13:49:44 +00:00
|
|
|
write_res = write_packet_to_udp(fdinfo->fd, bp, fdinfo->other);
|
2019-02-18 20:55:53 +00:00
|
|
|
if (write_res == FDS_ERR) goto co_error;
|
|
|
|
if (bp->mode != BP_READING) return;
|
2019-02-11 22:40:37 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
// 3. A whole packet has been written
|
|
|
|
// Release the buffer and notify
|
|
|
|
mv_buffer_wtor(app_ctx, fdinfo, bp);
|
|
|
|
notify_read(ctx, app_ctx);
|
2019-02-11 22:40:37 +00:00
|
|
|
|
2019-02-19 09:44:22 +00:00
|
|
|
return;
|
2019-02-11 22:40:37 +00:00
|
|
|
co_error:
|
2019-02-18 20:55:53 +00:00
|
|
|
perror("Failed to UDP write");
|
2019-02-11 22:40:37 +00:00
|
|
|
exit(EXIT_FAILURE);
|
2019-02-11 21:40:00 +00:00
|
|
|
}
|
|
|
|
|
2019-02-18 16:51:13 +00:00
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
void naive_free_simple(void* v) {
|
2019-02-18 16:51:13 +00:00
|
|
|
free(v);
|
|
|
|
}
|
|
|
|
|
2019-02-18 20:55:53 +00:00
|
|
|
void on_err(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|
|
|
struct naive_ctx* app_ctx = fdinfo->cat->app_ctx;
|
|
|
|
struct buffer_packet* bp;
|
|
|
|
|
|
|
|
// 1. If has a "used" buffer, remove it
|
|
|
|
bp = g_hash_table_lookup (app_ctx->used_buffer, &(fdinfo->fd));
|
|
|
|
if (bp != NULL) {
|
|
|
|
g_hash_table_remove (app_ctx->used_buffer, &(fdinfo->fd));
|
|
|
|
memset(bp, 0, sizeof(struct buffer_packet));
|
|
|
|
g_queue_push_tail(app_ctx->free_buffer, bp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// 2. If appears in the write waiting queue, remove it
|
|
|
|
GQueue* writew = g_hash_table_lookup (app_ctx->write_waiting, &(fdinfo->fd));
|
|
|
|
while (writew != NULL && (bp = g_queue_pop_head (writew)) != NULL) {
|
|
|
|
memset(bp, 0, sizeof(struct buffer_packet));
|
|
|
|
g_queue_push_tail(app_ctx->free_buffer, bp);
|
|
|
|
}
|
|
|
|
g_hash_table_remove (app_ctx->write_waiting, &(fdinfo->fd));
|
|
|
|
|
|
|
|
// 3. If appears in the read waiting queue, remove it
|
|
|
|
g_queue_remove_all (app_ctx->read_waiting, &(fdinfo->fd));
|
|
|
|
}
|
|
|
|
|
2019-02-11 21:40:00 +00:00
|
|
|
void algo_naive(struct algo_skel* as) {
|
2019-02-12 21:38:42 +00:00
|
|
|
struct naive_ctx* ctx = malloc(sizeof(struct naive_ctx));
|
|
|
|
if (ctx == NULL) goto init_err;
|
|
|
|
memset(ctx, 0, sizeof(struct naive_ctx));
|
2019-02-18 20:55:53 +00:00
|
|
|
ctx->free_buffer = g_queue_new ();
|
|
|
|
ctx->read_waiting = g_queue_new ();
|
|
|
|
ctx->used_buffer = g_hash_table_new(g_int_hash, g_int_equal);
|
|
|
|
ctx->write_waiting = g_hash_table_new_full (g_int_hash, g_int_equal, NULL, naive_free_simple);
|
|
|
|
for (int i = 0; i < sizeof(ctx->bps) / sizeof(ctx->bps[0]); i++) {
|
|
|
|
g_queue_push_tail(ctx->free_buffer, &(ctx->bps[i]));
|
|
|
|
}
|
2019-02-12 21:38:42 +00:00
|
|
|
|
2019-02-11 21:40:00 +00:00
|
|
|
as->on_tcp_co.name = "tcp-listen";
|
2019-02-12 10:17:37 +00:00
|
|
|
as->on_tcp_co.flags = EPOLLIN;
|
2019-02-12 21:38:42 +00:00
|
|
|
as->on_tcp_co.free_app_ctx = free_nothing;
|
2019-02-11 21:40:00 +00:00
|
|
|
as->on_tcp_co.cb = on_tcp_co;
|
|
|
|
|
2019-02-12 10:17:37 +00:00
|
|
|
as->on_tcp_read.name = "tcp-read";
|
2019-02-12 18:29:00 +00:00
|
|
|
as->on_tcp_read.flags = EPOLLIN | EPOLLET | EPOLLRDHUP;
|
2019-02-12 21:38:42 +00:00
|
|
|
as->on_tcp_read.app_ctx = ctx;
|
2019-02-12 10:17:37 +00:00
|
|
|
as->on_tcp_read.free_app_ctx = free_naive;
|
2019-02-18 16:51:13 +00:00
|
|
|
as->on_tcp_read.cb = on_tcp_read;
|
2019-02-18 20:55:53 +00:00
|
|
|
as->on_tcp_read.err_cb = on_err;
|
2019-02-12 21:38:42 +00:00
|
|
|
ctx->ref_count++;
|
2019-02-12 10:17:37 +00:00
|
|
|
|
|
|
|
as->on_udp_read.name = "udp-read";
|
|
|
|
as->on_udp_read.flags = EPOLLIN | EPOLLET;
|
2019-02-12 21:38:42 +00:00
|
|
|
as->on_udp_read.app_ctx = ctx;
|
2019-02-12 10:17:37 +00:00
|
|
|
as->on_udp_read.free_app_ctx = free_naive;
|
2019-02-18 16:51:13 +00:00
|
|
|
as->on_udp_read.cb = on_udp_read;
|
2019-02-18 20:55:53 +00:00
|
|
|
as->on_udp_read.err_cb = on_err;
|
2019-02-12 21:38:42 +00:00
|
|
|
ctx->ref_count++;
|
2019-02-12 10:17:37 +00:00
|
|
|
|
|
|
|
as->on_tcp_write.name = "tcp-write";
|
2019-02-12 18:29:00 +00:00
|
|
|
as->on_tcp_write.flags = EPOLLOUT | EPOLLET | EPOLLRDHUP;
|
2019-02-12 21:38:42 +00:00
|
|
|
as->on_tcp_write.app_ctx = ctx;
|
|
|
|
as->on_tcp_write.free_app_ctx = free_naive;
|
2019-02-18 16:51:13 +00:00
|
|
|
as->on_tcp_write.cb = on_tcp_write;
|
2019-02-18 20:55:53 +00:00
|
|
|
as->on_tcp_write.err_cb = on_err;
|
2019-02-12 21:38:42 +00:00
|
|
|
ctx->ref_count++;
|
2019-02-12 10:17:37 +00:00
|
|
|
|
|
|
|
as->on_udp_write.name = "udp-write";
|
|
|
|
as->on_udp_write.flags = EPOLLOUT | EPOLLET;
|
2019-02-12 21:38:42 +00:00
|
|
|
as->on_udp_write.app_ctx = ctx;
|
|
|
|
as->on_udp_write.free_app_ctx = free_naive;
|
2019-02-18 16:51:13 +00:00
|
|
|
as->on_udp_write.cb = on_udp_write;
|
2019-02-18 20:55:53 +00:00
|
|
|
as->on_udp_write.err_cb = on_err;
|
2019-02-12 21:38:42 +00:00
|
|
|
ctx->ref_count++;
|
2019-02-12 10:17:37 +00:00
|
|
|
|
2019-02-12 16:47:14 +00:00
|
|
|
return;
|
|
|
|
init_err:
|
|
|
|
fprintf(stderr, "Failed to init algo naive\n");
|
|
|
|
exit(EXIT_FAILURE);
|
2019-02-11 21:40:00 +00:00
|
|
|
}
|