Merge branch 'feature/rr'
This commit is contained in:
commit
ce623a50cf
17 changed files with 665 additions and 77 deletions
20
scripts/xp1
20
scripts/xp1
|
@ -2,8 +2,14 @@
|
|||
|
||||
# Create workdir
|
||||
XP_FOLDER=`mktemp -d -p . xp1-XXXXXXXXXXXXXXXX`
|
||||
mkdir -p $XP_FOLDER
|
||||
XP_ID=`echo $XP_FOLDER|cut -c 3-`
|
||||
cd $XP_FOLDER
|
||||
COUNT=$1
|
||||
INTERVAL=$2
|
||||
SIZE=$3
|
||||
ALLOWED_BOOTSTRAP=190
|
||||
ALLOWED_TIME=`expr $(expr ${COUNT} * ${INTERVAL}) / 1000 + ${ALLOWED_BOOTSTRAP}`
|
||||
echo "Will run for ${ALLOWED_TIME} (count=$1, interval=$2)"
|
||||
|
||||
# Create folders
|
||||
docker run \
|
||||
|
@ -17,23 +23,27 @@ docker run \
|
|||
docker run \
|
||||
--privileged \
|
||||
--rm \
|
||||
--name "$XP_ID-server" \
|
||||
-v `pwd`/shared:/home/donar/shared \
|
||||
-v `pwd`/log:/home/donar/log \
|
||||
registry.gitlab.inria.fr/qdufour/donar \
|
||||
xp1-server &
|
||||
|
||||
sleep 30
|
||||
sleep 10
|
||||
|
||||
# Run client
|
||||
docker run \
|
||||
--privileged \
|
||||
--rm \
|
||||
--name "$XP_ID-client" \
|
||||
-v `pwd`/res:/home/donar/res \
|
||||
-v `pwd`/shared:/home/donar/shared \
|
||||
-v `pwd`/log:/home/donar/log \
|
||||
registry.gitlab.inria.fr/qdufour/donar \
|
||||
xp1-client $1 $2 $3
|
||||
xp1-client $COUNT $INTERVAL $SIZE &
|
||||
|
||||
# Kill server
|
||||
kill %1
|
||||
sleep $ALLOWED_TIME
|
||||
|
||||
# Kill
|
||||
docker kill "$XP_ID-client" || true
|
||||
docker kill "$XP_ID-server" || true
|
||||
|
|
|
@ -65,13 +65,13 @@ int on_tcp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|||
to_fdinfo = evt_core_get_from_url (ctx, url);
|
||||
if (to_fdinfo == NULL) {
|
||||
fprintf(stderr, "No fd for URL %s in tcp-read. Dropping packet :( \n", url);
|
||||
mv_buffer_wtor (app_ctx, fdinfo, bp);
|
||||
mv_buffer_wtof (app_ctx, fdinfo);
|
||||
return 1;
|
||||
}
|
||||
//printf("Pass packet from %s to %s\n", fdinfo->url, url);
|
||||
|
||||
// 4. We move the buffer and notify the target
|
||||
mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo, bp);
|
||||
mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo);
|
||||
on_udp_write(ctx, to_fdinfo);
|
||||
|
||||
return 0;
|
||||
|
@ -97,7 +97,7 @@ int on_tcp_write(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|||
|
||||
// 3. A whole packet has been written
|
||||
// Release the buffer and notify
|
||||
mv_buffer_wtor(app_ctx, fdinfo, bp);
|
||||
mv_buffer_wtof(app_ctx, fdinfo);
|
||||
notify_read(ctx, app_ctx);
|
||||
|
||||
return 0;
|
||||
|
@ -127,13 +127,13 @@ int on_udp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|||
to_fdinfo = evt_core_get_from_url (ctx, url);
|
||||
if (to_fdinfo == NULL) {
|
||||
fprintf(stderr, "No fd for URL %s in udp-read. Dropping packet :( \n", url);
|
||||
mv_buffer_wtor (app_ctx, fdinfo, bp);
|
||||
mv_buffer_wtof (app_ctx, fdinfo);
|
||||
return 1;
|
||||
}
|
||||
//printf("Pass packet from %s to %s\n", fdinfo->url, url);
|
||||
|
||||
// 4. We move the buffer and notify the target
|
||||
mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo, bp);
|
||||
mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo);
|
||||
on_tcp_write(ctx, to_fdinfo);
|
||||
|
||||
return 0;
|
||||
|
@ -158,7 +158,7 @@ int on_udp_write (struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|||
|
||||
// 3. A whole packet has been written
|
||||
// Release the buffer and notify
|
||||
mv_buffer_wtor(app_ctx, fdinfo, bp);
|
||||
mv_buffer_wtof(app_ctx, fdinfo);
|
||||
notify_read(ctx, app_ctx);
|
||||
|
||||
return 0;
|
||||
|
@ -193,12 +193,13 @@ int on_err(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void algo_naive(struct algo_skel* as) {
|
||||
void algo_naive(struct evt_core_ctx* evt, struct algo_skel* as) {
|
||||
struct algo_ctx* ctx = malloc(sizeof(struct algo_ctx));
|
||||
if (ctx == NULL) goto init_err;
|
||||
memset(ctx, 0, sizeof(struct algo_ctx));
|
||||
ctx->free_buffer = g_queue_new ();
|
||||
ctx->read_waiting = g_queue_new ();
|
||||
ctx->application_waiting = g_hash_table_new (NULL, NULL);
|
||||
ctx->used_buffer = g_hash_table_new(g_int_hash, g_int_equal);
|
||||
ctx->write_waiting = g_hash_table_new_full (g_int_hash, g_int_equal, NULL, naive_free_simple);
|
||||
for (int i = 0; i < sizeof(ctx->bps) / sizeof(ctx->bps[0]); i++) {
|
||||
|
|
464
src/algo_rr.c
464
src/algo_rr.c
|
@ -1,56 +1,496 @@
|
|||
#include <sys/timerfd.h>
|
||||
#include "algo_skel.h"
|
||||
#include "algo_utils.h"
|
||||
#include "utils.h"
|
||||
|
||||
void algo_rr(struct algo_skel* as) {
|
||||
struct waited_pkt {
|
||||
uint16_t id;
|
||||
int link_num;
|
||||
uint8_t on;
|
||||
int timer_fd;
|
||||
};
|
||||
|
||||
struct deferred_pkt {
|
||||
int link_fd;
|
||||
int idx;
|
||||
uint8_t on;
|
||||
};
|
||||
|
||||
struct rr_ctx {
|
||||
uint8_t my_links;
|
||||
uint16_t my_links_ver;
|
||||
uint8_t remote_links;
|
||||
int64_t mjit;
|
||||
uint16_t recv_id;
|
||||
uint16_t sent_id;
|
||||
uint8_t current_link;
|
||||
struct timespec emit_time;
|
||||
struct deferred_pkt real[PACKET_BUFFER_SIZE];
|
||||
struct waited_pkt wait[PACKET_BUFFER_SIZE];
|
||||
};
|
||||
|
||||
int rr_on_tcp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo);
|
||||
int rr_on_tcp_write(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo);
|
||||
int rr_on_udp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo);
|
||||
int rr_on_udp_write(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo);
|
||||
|
||||
int rr_on_tcp_co(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
||||
int conn_sock1, conn_sock2;
|
||||
struct sockaddr_in addr;
|
||||
socklen_t in_len;
|
||||
char url[1024], port[6];
|
||||
struct evt_core_cat local_cat = {0};
|
||||
struct evt_core_fdinfo to_fdinfo = {0};
|
||||
to_fdinfo.cat = &local_cat;
|
||||
to_fdinfo.url = url;
|
||||
|
||||
in_len = sizeof(addr);
|
||||
conn_sock1 = accept(fdinfo->fd, (struct sockaddr*)&addr, &in_len);
|
||||
|
||||
if (conn_sock1 == -1) goto co_error;
|
||||
conn_sock2 = dup(conn_sock1);
|
||||
if (conn_sock2 == -1) goto co_error;
|
||||
//printf("fd=%d accepts, creating fds=%d,%d\n", fd, conn_sock1, conn_sock2);
|
||||
|
||||
url_get_port(port, fdinfo->url);
|
||||
|
||||
to_fdinfo.fd = conn_sock1;
|
||||
to_fdinfo.cat->name = "tcp-read";
|
||||
sprintf(to_fdinfo.url, "tcp:read:127.0.0.1:%s", port);
|
||||
evt_core_add_fd (ctx, &to_fdinfo);
|
||||
|
||||
to_fdinfo.fd = conn_sock2;
|
||||
to_fdinfo.cat->name = "tcp-write";
|
||||
sprintf(to_fdinfo.url, "tcp:write:127.0.0.1:%s", port);
|
||||
evt_core_add_fd (ctx, &to_fdinfo);
|
||||
|
||||
return 1;
|
||||
|
||||
co_error:
|
||||
perror("Failed to handle new connection");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int set_timeout(struct evt_core_ctx* evts, uint64_t milli_sec, struct waited_pkt* wpkt) {
|
||||
struct timespec now;
|
||||
struct itimerspec timer_config;
|
||||
char url[1024];
|
||||
struct evt_core_cat cat = {0};
|
||||
struct evt_core_fdinfo fdinfo = {0};
|
||||
fdinfo.cat = &cat;
|
||||
fdinfo.url = url;
|
||||
|
||||
//printf("Will add a timeout of %ld ms\n", milli_sec);
|
||||
if (clock_gettime(CLOCK_REALTIME, &now) == -1) {
|
||||
perror("clock_gettime");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
uint64_t ns = now.tv_nsec + (milli_sec % 1000) * 1000000;
|
||||
timer_config.it_value.tv_sec = now.tv_sec + milli_sec / 1000 + ns / 1000000000;
|
||||
timer_config.it_value.tv_nsec = ns % 1000000000;
|
||||
timer_config.it_interval.tv_sec = 60;
|
||||
timer_config.it_interval.tv_nsec = 0;
|
||||
|
||||
fdinfo.fd = timerfd_create(CLOCK_REALTIME, 0);
|
||||
if (fdinfo.fd == -1) {
|
||||
perror("Unable to timerfd_create");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (timerfd_settime (fdinfo.fd, TFD_TIMER_ABSTIME, &timer_config, NULL) == -1) {
|
||||
perror("Unable to timerfd_settime");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
fdinfo.cat->name = "timeout";
|
||||
fdinfo.other = wpkt; // Should put the link number and the id
|
||||
fdinfo.free_other = NULL;
|
||||
sprintf(fdinfo.url, "timer:%ld:1", milli_sec);
|
||||
evt_core_add_fd (evts, &fdinfo);
|
||||
|
||||
return fdinfo.fd;
|
||||
}
|
||||
|
||||
void rr_pkt_register(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo, struct buffer_packet* bp) {
|
||||
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
||||
struct rr_ctx* rr = app_ctx->misc;
|
||||
|
||||
//printf("Selected url %s for pkt %d to be queued for delivery\n", fdinfo->url, bp->ip.ap.str.id);
|
||||
|
||||
// 1. Update links I can use thanks to target feedback
|
||||
if (bp->ip.ap.str.id > rr->my_links_ver) {
|
||||
rr->my_links = bp->ip.ap.str.bitfield;
|
||||
rr->my_links_ver = bp->ip.ap.str.id;
|
||||
}
|
||||
|
||||
// 2. If packet arrived too late, we discard it
|
||||
if (ring_gt(rr->recv_id, bp->ip.ap.str.id - 1)) {
|
||||
// Packet has already been delivered or dropped, we free the buffer
|
||||
fprintf(stderr, "Packet %d arrived too late (current: %d)\n", bp->ip.ap.str.id, rr->recv_id);
|
||||
mv_buffer_wtof (app_ctx, fdinfo);
|
||||
return;
|
||||
}
|
||||
|
||||
// 3. If packet arrived too early, we register a timer
|
||||
//printf("%d < %d = %d\n", rr->recv_id, bp->ip.ap.str.id - 1, ring_lt(rr->recv_id, bp->ip.ap.str.id - 1));
|
||||
if (ring_lt(rr->recv_id, bp->ip.ap.str.id - 1)) {
|
||||
int64_t timeout = rr->mjit - (int64_t) bp->ip.ap.str.deltat;
|
||||
//printf("%ld - %ld = %ld\n", rr->mjit, (int64_t) bp->ip.ap.str.deltat, timeout);
|
||||
if (timeout <= 0) timeout = 0;
|
||||
int idx_waited = (bp->ip.ap.str.id - 1) % PACKET_BUFFER_SIZE;
|
||||
rr->wait[idx_waited].on = 1;
|
||||
rr->wait[idx_waited].id = bp->ip.ap.str.id - 1;
|
||||
rr->wait[idx_waited].link_num = bp->ip.ap.str.prevlink;
|
||||
rr->wait[idx_waited].timer_fd = set_timeout(ctx, timeout, &rr->wait[idx_waited]);
|
||||
}
|
||||
|
||||
// 4. We queue the packet
|
||||
int idx_real = bp->ip.ap.str.id % PACKET_BUFFER_SIZE;
|
||||
rr->real[idx_real].on = 1;
|
||||
rr->real[idx_real].idx = idx_real;
|
||||
rr->real[idx_real].link_fd = fdinfo->fd;
|
||||
mv_buffer_rtoa(app_ctx, fdinfo, &rr->real[idx_real].idx);
|
||||
|
||||
// 5. We make sure that the remote link is set to up
|
||||
char buffer[16];
|
||||
url_get_port (buffer, fdinfo->url);
|
||||
int link_num = atoi(buffer) - 7500; // @FIXME Hardcoded
|
||||
rr->remote_links |= 1 << link_num; // Make sure that the link is marked as working
|
||||
}
|
||||
|
||||
void rr_deliver(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo, struct deferred_pkt* dp) {
|
||||
struct evt_core_fdinfo *to_fdinfo = NULL;
|
||||
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
||||
struct rr_ctx* rr = app_ctx->misc;
|
||||
char url[255];
|
||||
|
||||
// 1. Marked the packet as handled
|
||||
dp->on = 0;
|
||||
|
||||
// 2. Get the buffer
|
||||
struct buffer_packet* bp = get_app_buffer (app_ctx, &dp->idx);
|
||||
//printf("Selected url %s for pkt %d to be delivered\n", fdinfo->url, bp->ip.ap.str.id);
|
||||
|
||||
// 3. We update our cursor
|
||||
rr->recv_id = bp->ip.ap.str.id;
|
||||
|
||||
// 4. We check that we don't have a running timeout
|
||||
int idx_real = bp->ip.ap.str.id % PACKET_BUFFER_SIZE;
|
||||
if (rr->wait[idx_real].on) {
|
||||
rr->wait[idx_real].on = 0;
|
||||
evt_core_rm_fd (ctx, rr->wait[idx_real].timer_fd);
|
||||
printf("Removed timer for packet %d\n",bp->ip.ap.str.id);
|
||||
}
|
||||
|
||||
// 5. We free the buffer if it's a control packet and quit
|
||||
if (bp->ip.ap.str.flags & PKT_CONTROL) {
|
||||
mv_buffer_atof (app_ctx, &dp->idx);
|
||||
return;
|
||||
}
|
||||
|
||||
// 6. Find its target
|
||||
sprintf(url, "udp:write:127.0.0.1:%d", bp->ip.ap.str.port);
|
||||
to_fdinfo = evt_core_get_from_url (ctx, url);
|
||||
if (to_fdinfo == NULL) {
|
||||
fprintf(stderr, "No fd for URL %s in udp:write for tcp-read. Dropping packet :( \n", url);
|
||||
//mv_buffer_wtor (app_ctx, fdinfo, bp);
|
||||
mv_buffer_atof (app_ctx, &dp->idx);
|
||||
}
|
||||
|
||||
// 4. We move the buffer and notify the target
|
||||
//mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo, bp);
|
||||
mv_buffer_atow (app_ctx, &dp->idx, to_fdinfo);
|
||||
rr_on_udp_write(ctx, to_fdinfo);
|
||||
}
|
||||
|
||||
void rr_pkt_unroll(struct evt_core_ctx* ctx, struct algo_ctx* app_ctx) {
|
||||
struct rr_ctx* rr = app_ctx->misc;
|
||||
struct evt_core_fdinfo* fdinfo = NULL;
|
||||
struct buffer_packet* bp = NULL;
|
||||
|
||||
while(1) {
|
||||
struct deferred_pkt* def = &rr->real[(rr->recv_id+1) % PACKET_BUFFER_SIZE];
|
||||
if (!def->on) break;
|
||||
fdinfo = evt_core_get_from_fd (ctx, def->link_fd);
|
||||
if (fdinfo == NULL) {
|
||||
fprintf(stderr, "An error occured as the link seems to be closed for the requested fd\n");
|
||||
rr->recv_id++;
|
||||
continue;
|
||||
}
|
||||
|
||||
rr_deliver(ctx, fdinfo, def);
|
||||
}
|
||||
}
|
||||
|
||||
//------
|
||||
|
||||
int rr_on_tcp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
||||
struct buffer_packet* bp;
|
||||
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
||||
struct rr_ctx* rr = app_ctx->misc;
|
||||
int read_res = FDS_READY;
|
||||
|
||||
// 1. Get current read buffer OR a new read buffer OR subscribe to be notified later
|
||||
if ((bp = get_read_buffer(app_ctx, fdinfo)) == NULL) return 1;
|
||||
|
||||
// 2. Try to read a whole packet in the buffer
|
||||
while (bp->mode == BP_READING) {
|
||||
read_res = read_packet_from_tcp (fdinfo->fd, bp);
|
||||
if (read_res == FDS_ERR) goto co_error;
|
||||
if (read_res == FDS_AGAIN) return 1;
|
||||
}
|
||||
|
||||
// 3. Logic on packet
|
||||
rr_pkt_register(ctx, fdinfo, bp);
|
||||
rr_pkt_unroll (ctx, app_ctx);
|
||||
|
||||
return 0;
|
||||
co_error:
|
||||
perror("Failed to TCP read");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int rr_on_udp_write (struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
||||
struct buffer_packet* bp;
|
||||
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
||||
int write_res = FDS_READY;
|
||||
|
||||
// 1. Get current write buffer OR a buffer from the waiting queue OR leave
|
||||
if ((bp = get_write_buffer(app_ctx, fdinfo)) == NULL) return 1;
|
||||
|
||||
// 2. Write buffer
|
||||
write_res = write_packet_to_udp(fdinfo->fd, bp, fdinfo->other);
|
||||
if (write_res == FDS_ERR) goto co_error;
|
||||
if (write_res == FDS_AGAIN) return 1;
|
||||
|
||||
// 3. A whole packet has been written
|
||||
// Release the buffer and notify
|
||||
mv_buffer_wtof(app_ctx, fdinfo);
|
||||
notify_read(ctx, app_ctx);
|
||||
|
||||
return 0;
|
||||
co_error:
|
||||
perror("Failed to UDP write");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int rr_on_udp_read(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
||||
struct buffer_packet* bp;
|
||||
struct evt_core_fdinfo *to_fdinfo = NULL;
|
||||
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
||||
struct rr_ctx* rr = app_ctx->misc;
|
||||
int read_res = FDS_READY;
|
||||
char url[255];
|
||||
|
||||
// 1. Get current read buffer OR a new read buffer OR subscribe to be notified later
|
||||
if ((bp = get_read_buffer(app_ctx, fdinfo)) == NULL) return 1;
|
||||
|
||||
// 2. Read packet from socket
|
||||
bp->ip.ap.str.port = url_get_port_int (fdinfo->url);
|
||||
read_res = read_packet_from_udp (fdinfo->fd, bp, fdinfo->other);
|
||||
if (read_res == FDS_ERR) goto co_error;
|
||||
if (read_res == FDS_AGAIN) return 1;
|
||||
|
||||
// 3. Prepare RR state and packet values
|
||||
struct timespec curr;
|
||||
int secs, nsecs;
|
||||
uint64_t mili_sec;
|
||||
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &curr) == -1){
|
||||
perror("clock_gettime error");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
secs = curr.tv_sec - rr->emit_time.tv_sec;
|
||||
nsecs = curr.tv_nsec - rr->emit_time.tv_nsec;
|
||||
mili_sec = secs * 1000 + nsecs / 1000000;
|
||||
if (mili_sec > rr->mjit) mili_sec = rr->mjit;
|
||||
|
||||
bp->ip.ap.str.id = rr->sent_id;
|
||||
bp->ip.ap.str.flags = 0;
|
||||
bp->ip.ap.str.deltat = mili_sec;
|
||||
bp->ip.ap.str.bitfield = rr->remote_links;
|
||||
bp->ip.ap.str.prevlink = rr->current_link;
|
||||
|
||||
int max = 10;
|
||||
while(1) {
|
||||
if (max-- < 0) break;
|
||||
rr->current_link = (rr->current_link + 1) % 10;
|
||||
if (!(rr->my_links & (1 << rr->current_link))) continue;
|
||||
sprintf(url, "tcp:write:127.0.0.1:%d", 7500 + rr->current_link); //@FIXME Hardcoded
|
||||
//printf("-- Trying %s\n", url);
|
||||
to_fdinfo = evt_core_get_from_url (ctx, url);
|
||||
if (to_fdinfo != NULL) {
|
||||
//printf("Selected url %s for pkt %d to be sent on Tor\n", url, bp->ip.ap.str.id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rr->emit_time = curr;
|
||||
rr->sent_id++;
|
||||
|
||||
// 4. A whole packet has been read, we will find someone to write it
|
||||
if (to_fdinfo == NULL) {
|
||||
fprintf(stderr, "No fd for URL %s in udp-read. Dropping packet :( \n", fdinfo->url);
|
||||
mv_buffer_wtof (app_ctx, fdinfo);
|
||||
return 1;
|
||||
}
|
||||
//printf("Pass packet from %s to %s\n", fdinfo->url, url);
|
||||
|
||||
// 5. We move the buffer and notify the target
|
||||
mv_buffer_rtow (app_ctx, fdinfo, to_fdinfo);
|
||||
rr_on_tcp_write(ctx, to_fdinfo);
|
||||
|
||||
return 0;
|
||||
|
||||
co_error:
|
||||
perror("Failed to UDP read");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int rr_on_tcp_write(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
||||
struct buffer_packet* bp;
|
||||
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
||||
int write_res = FDS_READY;
|
||||
|
||||
// 1. Get current write buffer OR a buffer from the waiting queue OR leave
|
||||
if ((bp = get_write_buffer(app_ctx, fdinfo)) == NULL) return 1;
|
||||
|
||||
// 2. Write data from the buffer to the socket
|
||||
while (bp->mode == BP_WRITING) {
|
||||
write_res = write_packet_to_tcp(fdinfo->fd, bp);
|
||||
if (write_res == FDS_ERR) goto co_error;
|
||||
if (write_res == FDS_AGAIN) return 1;
|
||||
}
|
||||
|
||||
// 3. A whole packet has been written
|
||||
// Release the buffer and notify
|
||||
mv_buffer_wtof(app_ctx, fdinfo);
|
||||
notify_read(ctx, app_ctx);
|
||||
|
||||
return 0;
|
||||
co_error:
|
||||
perror("Failed to TCP write");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int rr_on_timer(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
||||
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
||||
struct rr_ctx* rr = app_ctx->misc;
|
||||
|
||||
struct waited_pkt* pkt = fdinfo->other;
|
||||
evt_core_rm_fd(ctx, fdinfo->fd);
|
||||
pkt->on = 0;
|
||||
if (ring_lt(pkt->id, rr->recv_id)) return 1;
|
||||
|
||||
printf("Timer reached for packet %d\n", pkt->id);
|
||||
|
||||
// !BLACKLIST LINK
|
||||
//rr->remote_links &= 0xffff ^ 1 << pkt->link_num;
|
||||
|
||||
while (ring_lt(rr->recv_id, pkt->id)) {
|
||||
rr->recv_id++;
|
||||
rr_pkt_unroll (ctx, app_ctx);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int rr_on_err(struct evt_core_ctx* ctx, struct evt_core_fdinfo* fdinfo) {
|
||||
struct algo_ctx* app_ctx = fdinfo->cat->app_ctx;
|
||||
struct buffer_packet* bp;
|
||||
|
||||
// 1. If has a "used" buffer, remove it
|
||||
bp = g_hash_table_lookup (app_ctx->used_buffer, &(fdinfo->fd));
|
||||
if (bp != NULL) {
|
||||
g_hash_table_remove (app_ctx->used_buffer, &(fdinfo->fd));
|
||||
memset(bp, 0, sizeof(struct buffer_packet));
|
||||
g_queue_push_tail(app_ctx->free_buffer, bp);
|
||||
}
|
||||
|
||||
// 2. If appears in the write waiting queue, remove it
|
||||
GQueue* writew = g_hash_table_lookup (app_ctx->write_waiting, &(fdinfo->fd));
|
||||
while (writew != NULL && (bp = g_queue_pop_head (writew)) != NULL) {
|
||||
memset(bp, 0, sizeof(struct buffer_packet));
|
||||
g_queue_push_tail(app_ctx->free_buffer, bp);
|
||||
}
|
||||
g_hash_table_remove (app_ctx->write_waiting, &(fdinfo->fd));
|
||||
|
||||
// 3. If appears in the read waiting queue, remove it
|
||||
g_queue_remove_all (app_ctx->read_waiting, &(fdinfo->fd));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void algo_rr(struct evt_core_ctx* evt, struct algo_skel* as) {
|
||||
struct algo_ctx* ctx = malloc(sizeof(struct algo_ctx));
|
||||
if (ctx == NULL) goto init_err;
|
||||
memset(ctx, 0, sizeof(struct algo_ctx));
|
||||
ctx->free_buffer = g_queue_new ();
|
||||
ctx->read_waiting = g_queue_new ();
|
||||
ctx->application_waiting = g_hash_table_new (NULL, NULL);
|
||||
ctx->used_buffer = g_hash_table_new(g_int_hash, g_int_equal);
|
||||
ctx->write_waiting = g_hash_table_new_full (g_int_hash, g_int_equal, NULL, naive_free_simple);
|
||||
struct rr_ctx* rr = malloc(sizeof(struct rr_ctx));
|
||||
if (rr == NULL) goto init_err;
|
||||
memset(rr, 0, sizeof(struct rr_ctx));
|
||||
rr->mjit = 200;
|
||||
rr->my_links = 0xff;
|
||||
rr->remote_links = 0xff;
|
||||
rr->sent_id = 1;
|
||||
ctx->misc = rr;
|
||||
for (int i = 0; i < sizeof(ctx->bps) / sizeof(ctx->bps[0]); i++) {
|
||||
g_queue_push_tail(ctx->free_buffer, &(ctx->bps[i]));
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
as->on_tcp_co.name = "tcp-listen";
|
||||
as->on_tcp_co.flags = EPOLLIN;
|
||||
as->on_tcp_co.free_app_ctx = free_nothing;
|
||||
as->on_tcp_co.cb = on_tcp_co;
|
||||
as->on_tcp_co.cb = rr_on_tcp_co;
|
||||
|
||||
as->on_tcp_read.name = "tcp-read";
|
||||
as->on_tcp_read.flags = EPOLLIN | EPOLLET | EPOLLRDHUP;
|
||||
as->on_tcp_read.app_ctx = ctx;
|
||||
as->on_tcp_read.free_app_ctx = free_naive;
|
||||
as->on_tcp_read.cb = on_tcp_read;
|
||||
as->on_tcp_read.err_cb = on_err;
|
||||
as->on_tcp_read.cb = rr_on_tcp_read;
|
||||
as->on_tcp_read.err_cb = rr_on_err;
|
||||
ctx->ref_count++;
|
||||
|
||||
|
||||
as->on_udp_read.name = "udp-read";
|
||||
as->on_udp_read.flags = EPOLLIN | EPOLLET;
|
||||
as->on_udp_read.app_ctx = ctx;
|
||||
as->on_udp_read.free_app_ctx = free_naive;
|
||||
as->on_udp_read.cb = on_udp_read;
|
||||
as->on_udp_read.err_cb = on_err;
|
||||
as->on_udp_read.cb = rr_on_udp_read;
|
||||
as->on_udp_read.err_cb = rr_on_err;
|
||||
ctx->ref_count++;
|
||||
|
||||
as->on_tcp_write.name = "tcp-write";
|
||||
as->on_tcp_write.flags = EPOLLOUT | EPOLLET | EPOLLRDHUP;
|
||||
as->on_tcp_write.app_ctx = ctx;
|
||||
as->on_tcp_write.free_app_ctx = free_naive;
|
||||
as->on_tcp_write.cb = on_tcp_write;
|
||||
as->on_tcp_write.err_cb = on_err;
|
||||
as->on_tcp_write.cb = rr_on_tcp_write;
|
||||
as->on_tcp_write.err_cb = rr_on_err;
|
||||
ctx->ref_count++;
|
||||
|
||||
as->on_udp_write.name = "udp-write";
|
||||
as->on_udp_write.flags = EPOLLOUT | EPOLLET;
|
||||
as->on_udp_write.app_ctx = ctx;
|
||||
as->on_udp_write.free_app_ctx = free_naive;
|
||||
as->on_udp_write.cb = on_udp_write;
|
||||
as->on_udp_write.err_cb = on_err;
|
||||
as->on_udp_write.cb = rr_on_udp_write;
|
||||
as->on_udp_write.err_cb = rr_on_err;
|
||||
ctx->ref_count++;
|
||||
*/
|
||||
|
||||
struct evt_core_cat tcat = {
|
||||
.name = "timeout",
|
||||
.flags = EPOLLIN | EPOLLET,
|
||||
.app_ctx = ctx,
|
||||
.free_app_ctx = free_naive,
|
||||
.cb = rr_on_timer,
|
||||
.err_cb = NULL
|
||||
};
|
||||
ctx->ref_count++;
|
||||
evt_core_add_cat(evt, &tcat);
|
||||
|
||||
return;
|
||||
init_err:
|
||||
fprintf(stderr, "Failed to init algo naive\n");
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
#include "algo_skel.h"
|
||||
|
||||
void init_algo(struct algo_skel* as, char* name) {
|
||||
void init_algo(struct evt_core_ctx* ctx, struct algo_skel* as, char* name) {
|
||||
for (int i = 0; i < sizeof(available_algo) / sizeof(available_algo[0]); i++) {
|
||||
if (strcmp(available_algo[i].name, name) == 0) {
|
||||
available_algo[i].init(as);
|
||||
available_algo[i].init(ctx, as);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,10 +16,11 @@ struct algo_skel {
|
|||
struct evt_core_cat on_tcp_co;
|
||||
};
|
||||
|
||||
typedef void (*algo_init)(struct algo_skel* as);
|
||||
typedef void (*algo_init)(struct evt_core_ctx* ctx, struct algo_skel* as);
|
||||
|
||||
void init_algo(struct algo_skel* as, char* name);
|
||||
void algo_naive(struct algo_skel* as);
|
||||
void init_algo(struct evt_core_ctx* ctx, struct algo_skel* as, char* name);
|
||||
void algo_naive(struct evt_core_ctx* ctx, struct algo_skel* as);
|
||||
void algo_rr(struct evt_core_ctx* ctx, struct algo_skel* as);
|
||||
|
||||
struct algo_desc {
|
||||
algo_init init;
|
||||
|
@ -30,5 +31,9 @@ static struct algo_desc available_algo[] = {
|
|||
{
|
||||
.init = algo_naive,
|
||||
.name = "naive"
|
||||
},
|
||||
{
|
||||
.init = algo_rr,
|
||||
.name = "rr"
|
||||
}
|
||||
};
|
||||
|
|
113
src/algo_utils.c
113
src/algo_utils.c
|
@ -12,6 +12,15 @@ void free_naive(void* app_ctx) {
|
|||
free(ctx);
|
||||
}
|
||||
|
||||
void iterate(int* fd, GQueue* q, int* waiting_count) {
|
||||
fprintf(stderr, "Queue for fd=%d has length=%d\n", *fd, q->length);
|
||||
waiting_count += q->length;
|
||||
}
|
||||
|
||||
void iterate2(int* fd, struct buffer_packet *bp, gpointer user_data) {
|
||||
fprintf(stderr, "fd=%d has a used_buffer entry\n", *fd);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a buffer if available, NULL otherwise
|
||||
*/
|
||||
|
@ -25,6 +34,16 @@ struct buffer_packet* get_read_buffer(struct algo_ctx *app_ctx, struct evt_core_
|
|||
// 2. Get a new buffer otherwise
|
||||
bp = g_queue_pop_head(app_ctx->free_buffer);
|
||||
if (bp == NULL) {
|
||||
fprintf(stderr, "No more free buffer for fd=%d.\n", fdinfo->fd);
|
||||
int waiting_count = 0;
|
||||
g_hash_table_foreach(app_ctx->write_waiting, (GHFunc)iterate, &waiting_count);
|
||||
g_hash_table_foreach(app_ctx->used_buffer, (GHFunc)iterate2, NULL);
|
||||
fprintf(stderr, "total_buffers=%d, free_buffer=%d, used_buffers=%d, app_buffer=%d, write_buffer=%d.\n",
|
||||
PACKET_BUFFER_SIZE,
|
||||
app_ctx->free_buffer->length,
|
||||
g_hash_table_size(app_ctx->used_buffer),
|
||||
g_hash_table_size(app_ctx->application_waiting),
|
||||
waiting_count);
|
||||
// 2.1 If no buffer is available, we subscribe to be notified later
|
||||
g_queue_push_tail (app_ctx->read_waiting, &(fdinfo->fd));
|
||||
return NULL;
|
||||
|
@ -58,38 +77,108 @@ struct buffer_packet* get_write_buffer(struct algo_ctx *app_ctx, struct evt_core
|
|||
return bp;
|
||||
}
|
||||
|
||||
void mv_buffer_rtow(struct algo_ctx* app_ctx,
|
||||
struct evt_core_fdinfo* from,
|
||||
struct evt_core_fdinfo* to,
|
||||
struct buffer_packet* bp) {
|
||||
|
||||
// 1. We get the target writing queue
|
||||
void mv_buffer_rtow(struct algo_ctx* app_ctx, struct evt_core_fdinfo* from, struct evt_core_fdinfo* to) {
|
||||
GQueue* q;
|
||||
struct buffer_packet* bp;
|
||||
|
||||
// 1. We get the packet buffer
|
||||
bp = g_hash_table_lookup (app_ctx->used_buffer, &from->fd);
|
||||
if (bp == NULL) {
|
||||
fprintf(stderr, "Unable to find a buffer for fd=%d url=%s", from->fd, from->url);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// 2. We get the target writing queue
|
||||
q = g_hash_table_lookup(app_ctx->write_waiting, &(to->fd));
|
||||
if (q == NULL) {
|
||||
q = g_queue_new ();
|
||||
g_hash_table_insert(app_ctx->write_waiting, &(to->fd), q);
|
||||
}
|
||||
|
||||
// 2. We move the buffer to the target queue
|
||||
// 3. We move the data
|
||||
g_hash_table_remove(app_ctx->used_buffer, &from->fd);
|
||||
g_queue_push_tail(q, bp);
|
||||
}
|
||||
|
||||
void mv_buffer_wtor(struct algo_ctx* app_ctx, struct evt_core_fdinfo* fdinfo, struct buffer_packet* bp) {
|
||||
void mv_buffer_wtof(struct algo_ctx* app_ctx, struct evt_core_fdinfo* fdinfo) {
|
||||
struct buffer_packet* bp = g_hash_table_lookup (app_ctx->used_buffer, &(fdinfo->fd));
|
||||
if (bp == NULL) {
|
||||
fprintf(stderr, "Unable to find a buffer for fd=%d url=%s", fdinfo->fd, fdinfo->url);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
bp->mode = BP_READING;
|
||||
bp->aread = 0;
|
||||
g_queue_push_tail (app_ctx->free_buffer, bp);
|
||||
g_hash_table_remove(app_ctx->used_buffer, &(fdinfo->fd));
|
||||
}
|
||||
|
||||
void mv_buffer_rtoa(struct algo_ctx* app_ctx, struct evt_core_fdinfo* from, void* to) {
|
||||
struct buffer_packet* bp;
|
||||
bp = g_hash_table_lookup (app_ctx->used_buffer, &from->fd);
|
||||
if (bp == NULL) {
|
||||
fprintf(stderr, "Unable to find a buffer for fd=%d url=%s\n", from->fd, from->url);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
g_hash_table_remove(app_ctx->used_buffer, &from->fd);
|
||||
if (g_hash_table_contains(app_ctx->application_waiting, to)) {
|
||||
fprintf(stderr, "Data already exist for this entry\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
g_hash_table_insert(app_ctx->application_waiting, to, bp);
|
||||
}
|
||||
|
||||
void mv_buffer_atow(struct algo_ctx* app_ctx, void* from, struct evt_core_fdinfo* to) {
|
||||
GQueue* q;
|
||||
struct buffer_packet* bp;
|
||||
|
||||
// 1. We get the buffer
|
||||
bp = g_hash_table_lookup (app_ctx->application_waiting, from);
|
||||
if (bp == NULL) {
|
||||
fprintf(stderr, "Unable to find this application buffer\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// 2. We get the target writing queue
|
||||
q = g_hash_table_lookup(app_ctx->write_waiting, &(to->fd));
|
||||
if (q == NULL) {
|
||||
q = g_queue_new ();
|
||||
g_hash_table_insert(app_ctx->write_waiting, &(to->fd), q);
|
||||
}
|
||||
|
||||
// 3. We move the buffer
|
||||
g_hash_table_remove (app_ctx->application_waiting, from);
|
||||
g_queue_push_tail(q, bp);
|
||||
}
|
||||
|
||||
void mv_buffer_atof(struct algo_ctx* app_ctx, void* from) {
|
||||
struct buffer_packet* bp;
|
||||
|
||||
// 1. We get the buffer
|
||||
bp = g_hash_table_lookup (app_ctx->application_waiting, from);
|
||||
if (bp == NULL) {
|
||||
fprintf(stderr, "Unable to find this application buffer\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// 2. We move it
|
||||
g_hash_table_remove (app_ctx->application_waiting, from);
|
||||
g_queue_push_tail (app_ctx->free_buffer, bp);
|
||||
}
|
||||
|
||||
struct buffer_packet* get_app_buffer(struct algo_ctx *app_ctx, void* idx) {
|
||||
return g_hash_table_lookup (app_ctx->application_waiting, idx);
|
||||
}
|
||||
|
||||
void notify_read(struct evt_core_ctx* ctx, struct algo_ctx* app_ctx) {
|
||||
struct evt_core_fdinfo* next_fdinfo = NULL;
|
||||
while (next_fdinfo == NULL) {
|
||||
int fd = GPOINTER_TO_INT(g_queue_pop_head(app_ctx->read_waiting));
|
||||
if (fd == 0) break;
|
||||
next_fdinfo = evt_core_get_from_fd (ctx, fd);
|
||||
if (strcmp(next_fdinfo->cat->name, "tcp-read") == 0 || strcmp(next_fdinfo->cat->name, "udp-read") == 0) {
|
||||
int* fd = g_queue_pop_head(app_ctx->read_waiting);
|
||||
if (fd == NULL) break;
|
||||
next_fdinfo = evt_core_get_from_fd (ctx, *fd);
|
||||
if (next_fdinfo == NULL) {
|
||||
fprintf(stderr, "Unable to find fdinfo for fd=%d\n", *fd);
|
||||
exit(EXIT_FAILURE);
|
||||
} else if (strcmp(next_fdinfo->cat->name, "tcp-read") == 0 || strcmp(next_fdinfo->cat->name, "udp-read") == 0) {
|
||||
next_fdinfo->cat->cb(ctx, next_fdinfo);
|
||||
} else {
|
||||
fprintf(stderr, "A fd from category %s can't be stored in read_waiting\n", next_fdinfo->cat->name);
|
||||
|
|
|
@ -3,24 +3,34 @@
|
|||
#include <glib-2.0/glib.h>
|
||||
#include <glib-2.0/gmodule.h>
|
||||
#include <glib-2.0/glib-object.h>
|
||||
#define PACKET_BUFFER_SIZE 20
|
||||
|
||||
typedef void (*algo_ctx_free_misc)(void*);
|
||||
|
||||
struct algo_ctx {
|
||||
int ref_count;
|
||||
struct buffer_packet bps[10];
|
||||
GQueue* free_buffer; // Available buffers
|
||||
GHashTable* used_buffer; // Buffers used for reading or writing
|
||||
GQueue* read_waiting; // Who wait to be notified for a read
|
||||
GHashTable* write_waiting; // Structure to track packets waiting to be written
|
||||
struct buffer_packet bps[PACKET_BUFFER_SIZE];
|
||||
GQueue* free_buffer; // Available buffers
|
||||
GHashTable* used_buffer; // Buffers used for reading or writing
|
||||
GQueue* read_waiting; // Who wait to be notified for a read
|
||||
GHashTable* application_waiting; // Structure that can be used by the algo for its internal logic
|
||||
GHashTable* write_waiting; // Structure to track packets waiting to be written
|
||||
void* misc; // Additional structures
|
||||
algo_ctx_free_misc free_misc; // Fx ptr to free misc
|
||||
};
|
||||
|
||||
void mv_buffer_rtow(struct algo_ctx* app_ctx,
|
||||
struct evt_core_fdinfo* from,
|
||||
struct evt_core_fdinfo* to,
|
||||
struct buffer_packet* bp);
|
||||
void mv_buffer_wtor(struct algo_ctx* app_ctx, struct evt_core_fdinfo* fdinfo, struct buffer_packet* bp);
|
||||
void mv_buffer_rtow(struct algo_ctx* app_ctx, struct evt_core_fdinfo* from, struct evt_core_fdinfo* to);
|
||||
void mv_buffer_wtof(struct algo_ctx* app_ctx, struct evt_core_fdinfo* from);
|
||||
void mv_buffer_rtoa(struct algo_ctx* app_ctx, struct evt_core_fdinfo* from, void* to);
|
||||
void mv_buffer_atow(struct algo_ctx* app_ctx, void* from, struct evt_core_fdinfo* to);
|
||||
void mv_buffer_atof(struct algo_ctx* app_ctx, void* from);
|
||||
|
||||
struct buffer_packet* get_write_buffer(struct algo_ctx *app_ctx, struct evt_core_fdinfo *fdinfo);
|
||||
struct buffer_packet* get_read_buffer(struct algo_ctx *app_ctx, struct evt_core_fdinfo *fdinfo);
|
||||
struct buffer_packet* get_app_buffer(struct algo_ctx *app_ctx, void* idx);
|
||||
|
||||
void notify_read(struct evt_core_ctx* ctx, struct algo_ctx* app_ctx);
|
||||
|
||||
void free_naive(void* app_ctx);
|
||||
void free_nothing(void* app_ctx);
|
||||
void notify_read(struct evt_core_ctx* ctx, struct algo_ctx* app_ctx);
|
||||
void naive_free_simple(void* v);
|
||||
|
|
|
@ -58,17 +58,14 @@ int main(int argc, char** argv) {
|
|||
if (!(is_server ^ is_client)) goto in_error;
|
||||
if (algo == NULL) goto in_error;
|
||||
|
||||
struct algo_skel as = {0};
|
||||
init_algo(&as, algo);
|
||||
|
||||
if (is_server) {
|
||||
struct donar_server_ctx ctx;
|
||||
if (exposed_ports->len < 1 && remote_ports->len < 1) goto in_error;
|
||||
donar_server(&ctx, &as, exposed_ports, remote_ports);
|
||||
donar_server(&ctx, algo, exposed_ports, remote_ports);
|
||||
} else if (is_client) {
|
||||
struct donar_client_ctx ctx;
|
||||
if ((exposed_ports->len < 1 && remote_ports->len < 1) || onion_file == NULL) goto in_error;
|
||||
donar_client(&ctx, &as, onion_file, exposed_ports, remote_ports);
|
||||
donar_client(&ctx, algo, onion_file, exposed_ports, remote_ports);
|
||||
}
|
||||
goto terminate;
|
||||
|
||||
|
|
|
@ -122,9 +122,12 @@ on_socks5_err:
|
|||
return 1;
|
||||
}
|
||||
|
||||
void donar_client(struct donar_client_ctx* ctx, struct algo_skel* algo,
|
||||
void donar_client(struct donar_client_ctx* ctx, char* algoname,
|
||||
char* onion_file, GPtrArray* exposed_ports, GPtrArray* remote_ports) {
|
||||
struct algo_skel algo = {0};
|
||||
|
||||
evt_core_init (&(ctx->evts));
|
||||
init_algo(&ctx->evts, &algo, algoname);
|
||||
struct evt_core_cat init_socks5 = {
|
||||
.app_ctx = ctx,
|
||||
.free_app_ctx = NULL,
|
||||
|
@ -135,11 +138,11 @@ void donar_client(struct donar_client_ctx* ctx, struct algo_skel* algo,
|
|||
.socklist = NULL
|
||||
};
|
||||
evt_core_add_cat (&(ctx->evts), &init_socks5);
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_tcp_co));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_udp_read));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_tcp_read));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_udp_write));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_tcp_write));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_tcp_co));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_udp_read));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_tcp_read));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_udp_write));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_tcp_write));
|
||||
printf("--- Categories created\n");
|
||||
|
||||
load_onion_services (ctx, onion_file, CLIENT_PORT_SIZE);
|
||||
|
|
|
@ -19,5 +19,5 @@ struct donar_client_ctx {
|
|||
} client_sock[CLIENT_PORT_SIZE];
|
||||
};
|
||||
|
||||
void donar_client(struct donar_client_ctx* ctx, struct algo_skel* as,
|
||||
void donar_client(struct donar_client_ctx* ctx, char* algoname,
|
||||
char* onion_file, GPtrArray* exposed_ports, GPtrArray* remote_ports);
|
||||
|
|
|
@ -51,14 +51,17 @@ socket_create_err:
|
|||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
void donar_server(struct donar_server_ctx* ctx, struct algo_skel* algo,
|
||||
void donar_server(struct donar_server_ctx* ctx, char* algoname,
|
||||
GPtrArray* exposed_ports, GPtrArray* remote_ports) {
|
||||
struct algo_skel algo = {0};
|
||||
|
||||
evt_core_init (&(ctx->evts));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_tcp_co));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_udp_read));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_tcp_read));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_udp_write));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo->on_tcp_write));
|
||||
init_algo(&ctx->evts, &algo, algoname);
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_tcp_co));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_udp_read));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_tcp_read));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_udp_write));
|
||||
evt_core_add_cat (&(ctx->evts), &(algo.on_tcp_write));
|
||||
|
||||
printf("--- Categories created\n");
|
||||
|
||||
|
|
|
@ -20,5 +20,5 @@ struct donar_server_ctx {
|
|||
uint16_t ports[PORT_SIZE];
|
||||
};
|
||||
|
||||
void donar_server(struct donar_server_ctx* ctx, struct algo_skel* algo,
|
||||
void donar_server(struct donar_server_ctx* ctx, char* algoname,
|
||||
GPtrArray* exposed_ports, GPtrArray* remote_ports);
|
||||
|
|
|
@ -4,7 +4,7 @@ void free_fdinfo(void* v) {
|
|||
struct evt_core_fdinfo* fdinfo = (struct evt_core_fdinfo*)v;
|
||||
close(fdinfo->fd); // We close the file descriptor here
|
||||
if (fdinfo->url != NULL) free(fdinfo->url); // We free the URL here;
|
||||
if (fdinfo->other != NULL) fdinfo->free_other(fdinfo->other);
|
||||
if (fdinfo->free_other != NULL) fdinfo->free_other(fdinfo->other);
|
||||
free(v);
|
||||
}
|
||||
|
||||
|
|
10
src/packet.h
10
src/packet.h
|
@ -28,12 +28,20 @@ enum BP_MODE {
|
|||
BP_WRITING
|
||||
};
|
||||
|
||||
enum PKT_FLAGS {
|
||||
PKT_CONTROL = 1 << 0
|
||||
};
|
||||
|
||||
union abstract_packet {
|
||||
char raw;
|
||||
struct {
|
||||
uint16_t size;
|
||||
uint16_t port;
|
||||
uint8_t id;
|
||||
uint16_t id;
|
||||
uint8_t bitfield;
|
||||
uint8_t prevlink;
|
||||
uint16_t deltat;
|
||||
uint8_t flags;
|
||||
char payload;
|
||||
} str;
|
||||
};
|
||||
|
|
|
@ -42,10 +42,7 @@ int socks5_reply(int sock) {
|
|||
int res;
|
||||
struct server_reply sr = {0};
|
||||
res = read_entity(sock, &sr, sizeof(uint8_t) * 4);
|
||||
if (res == -1) {
|
||||
perror("read_entity");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (res == -1) goto read_error;
|
||||
|
||||
switch(sr.atyp) {
|
||||
case ATYP_IPV4:
|
||||
|
|
20
src/utils.c
20
src/utils.c
|
@ -60,3 +60,23 @@ int ring_buffer_free_space(struct ring_buffer* rb) {
|
|||
int ring_buffer_used_space(struct ring_buffer* rb) {
|
||||
return RING_BUFFER_SIZE - ring_buffer_free_space (rb);
|
||||
}
|
||||
|
||||
// Why we are using modulo, plus and modulo again:
|
||||
// https://stackoverflow.com/a/1907585
|
||||
int ring_ge(uint16_t v1, uint16_t v2) {
|
||||
int64_t vv1 = (int64_t) v1, vv2 = (int64_t) v2;
|
||||
return (((vv1 - vv2) % UINT16_MAX) + UINT16_MAX) % UINT16_MAX <= UINT16_MAX / 2;
|
||||
}
|
||||
|
||||
int ring_gt(uint16_t v1, uint16_t v2) {
|
||||
if (v1 == v2) return 0;
|
||||
return ring_ge(v1,v2);
|
||||
}
|
||||
|
||||
int ring_le(uint16_t v1, uint16_t v2) {
|
||||
return ring_ge(v2, v1);
|
||||
}
|
||||
|
||||
int ring_lt(uint16_t v1, uint16_t v2) {
|
||||
return ring_gt(v2, v1);
|
||||
}
|
||||
|
|
|
@ -18,3 +18,8 @@ void ring_buffer_ack_read(struct ring_buffer* rb, int size);
|
|||
int ring_buffer_write(struct ring_buffer* rb, char* source, int size);
|
||||
int ring_buffer_free_space(struct ring_buffer* rb);
|
||||
int ring_buffer_used_space(struct ring_buffer* rb);
|
||||
|
||||
int ring_gt(uint16_t v1, uint16_t v2);
|
||||
int ring_ge(uint16_t v1, uint16_t v2);
|
||||
int ring_lt(uint16_t v1, uint16_t v2);
|
||||
int ring_le(uint16_t v1, uint16_t v2);
|
||||
|
|
Loading…
Reference in a new issue