From 7bfc46db78857d0f68e19c909c17038f5f59cfa3 Mon Sep 17 00:00:00 2001 From: Thomas Preud'homme Date: Tue, 30 Jun 2009 22:32:59 +0200 Subject: [PATCH] commtech: Delete pages free Pages cannots be freed as fast as they are allocated, so this whole mecanism can only delay the kernel panic. It's wiser to exit badly if too much memory is consumed --- .../src/communication/jikes_barrier.c | 33 ++----------------- 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/communication_techniques/src/communication/jikes_barrier.c b/communication_techniques/src/communication/jikes_barrier.c index 4c0c45a..e4f9942 100644 --- a/communication_techniques/src/communication/jikes_barrier.c +++ b/communication_techniques/src/communication/jikes_barrier.c @@ -30,7 +30,6 @@ static __thread void **local_tail = NULL; static __thread void **local_tail_buffer_end = NULL; static struct double_linked_list *global_tail = NULL; static struct double_linked_list *global_head = NULL; -static int collect = 0; static int bufsenqueued = 0; static unsigned int lock = 0; @@ -150,31 +149,10 @@ void closeAndEnqueueTail(int arity) void checkForAsyncCollection(void) { - if (bufsenqueued >= 102400) /* We use more than 100 Mo */ - collect = 1; -} - -void free_pages(struct double_linked_list *list_cur) -{ - if (list_cur == NULL) - return; - spin_lock(&lock); - /* - * We may free some buffer allocated after this line is executed, thus - * the real number of buffer enqueued could be bigger. - * This is done to avoid to keep the lock during the while loop and for - * checkForAsyncCollection to first start asking for a collection at - * 100 Mo in the *worst* case - */ - bufsenqueued = 0; - spin_unlock(&lock); - while (global_head != list_cur) /* We know closeAndEnqueueTail() ask enqueue() to enqueue at tail */ + if (bufsenqueued >= 262144) /* We use more than 1 Go */ { - void *buf_start; - - buf_start = (void *) ((uintptr_t) global_head & BUFFER_MASK); - global_head = global_head->next; - free(buf_start); + fprintf(stderr, "We use 1.5 Go. Program terminated before kernel panic\n"); + exit(1); } } @@ -215,11 +193,6 @@ void reception(void (*on_receive)(void *)) buf_start = (void **) (((uintptr_t) &list_cur->prev) & ~BUFFER_MASK); for (buf_ptr = buf_start; buf_ptr != (void **) &list_cur->prev; buf_ptr++) on_receive(*buf_ptr); - if (collect) - { - free_pages(list_cur); - collect = 0; - } while (cont && (list_cur->next == NULL)); } while (cont); }