[Aggregator] Use semaphores to lock APs' ring

The APs' ring is accessed concurrently by listen_for_aps() and
monitor_aps(), so we had to secure that.
This commit is contained in:
Matteo Cypriani 2011-06-09 18:53:11 +02:00
parent d2559062e6
commit 533936c4ee
2 changed files with 14 additions and 4 deletions

3
TODO
View File

@ -30,9 +30,6 @@
* Aggregator
- Use locks to read/write the AP list
Currently, several threads can access the list simultaneously, and
that's not cool!
- inet_ntoa() is not secure with threads
(But it is currently used by only one thread.) Use inet_ntop()
instead?

View File

@ -27,7 +27,8 @@ request_list *requests = NULL ; // Computed data list
sem_t lock_requests ; // Semaphore to get access to the requests
ap_list *token_aps = NULL ; // Token ring of the APs
uint_fast16_t nb_aps = 0 ; // Number of APs in the AP ring
uint_fast16_t nb_aps = 0 ; // Number of APs in the APs' ring
sem_t lock_aps ; // Semaphore to get access to the APs' ring
@ -55,6 +56,7 @@ int main(int argc, char **argv)
/* Set up semaphores */
sem_init(&lock_requests, 0, 1) ;
sem_init(&lock_aps, 0, 1) ;
/* Create UDP socket */
listening_port = cfg_getint(cfg, "listening_port") ;
@ -139,6 +141,7 @@ int main(int argc, char **argv)
cfg_free(cfg) ; // Clean configuration
// Destroy semaphores:
sem_destroy(&lock_requests) ;
sem_destroy(&lock_aps) ;
fprintf(stderr, "%s: end.\n", program_name) ;
return ret ;
@ -894,7 +897,9 @@ void* listen_for_aps(void *NULL_value)
ap_ip_addr) ;
#endif // DEBUG
sem_wait(&lock_aps) ;
update_ap(message.ap_mac_addr_bytes, ap_ip_addr) ;
sem_post(&lock_aps) ;
}
/* Close the socket */
@ -1026,13 +1031,21 @@ void* monitor_aps(void *NULL_value)
while (owl_run)
{
sem_wait(&lock_aps) ;
delete_old_aps() ;
sem_post(&lock_aps) ;
// Here we're not in a hurry, so we released the semaphore to
// allow listen_for_aps() to process a received hello packet,
// if needed.
sem_wait(&lock_aps) ;
if (nb_aps > 1)
{
order_send(token_aps) ;
token_aps = token_aps->next ;
}
sem_post(&lock_aps) ;
usleep(cfg_getint(cfg, "ap_check_interval") * 1000) ;
}