diff --git a/core/environment.c b/core/environment.c index af192c09b..d24de4bfc 100644 --- a/core/environment.c +++ b/core/environment.c @@ -210,10 +210,16 @@ int environment_init(environment_t* env, const char* name, int id, int num_worke const char* trace_file_name) { (void)trace_file_name; // Will be used with future enclave support. - env->name = malloc(strlen(name) + 1); // +1 for the null terminator - LF_ASSERT_NON_NULL(env->name); - strcpy(env->name, name); - + // Space for the name string with the null terminator. + if (name != NULL) { + size_t name_size = strlen(name) + 1; // +1 for the null terminator + env->name = (char*)malloc(name_size); + LF_ASSERT_NON_NULL(env->name); + // Use strncpy rather than strcpy to avoid compiler warnings. + strncpy(env->name, name, name_size); + } else { + env->name = NULL; + } env->id = id; env->stop_tag = FOREVER_TAG; @@ -284,3 +290,9 @@ int environment_init(environment_t* env, const char* name, int id, int num_worke env->initialized = true; return 0; } + +void environment_verify(environment_t* env) { + for (int i = 0; i < env->is_present_fields_size; i++) { + LF_ASSERT_NON_NULL(env->is_present_fields[i]); + } +} \ No newline at end of file diff --git a/core/federated/RTI/rti.Dockerfile b/core/federated/RTI/rti.Dockerfile index 0103abde8..bbe4f9d97 100644 --- a/core/federated/RTI/rti.Dockerfile +++ b/core/federated/RTI/rti.Dockerfile @@ -1,5 +1,5 @@ ARG BASEIMAGE=alpine:latest -FROM ${BASEIMAGE} as builder +FROM ${BASEIMAGE} AS builder COPY . /lingua-franca WORKDIR /lingua-franca/core/federated/RTI RUN set -ex && apk add --no-cache gcc musl-dev cmake make && \ @@ -12,7 +12,7 @@ RUN set -ex && apk add --no-cache gcc musl-dev cmake make && \ WORKDIR /lingua-franca # application stage -FROM ${BASEIMAGE} as app +FROM ${BASEIMAGE} AS app LABEL maintainer="lf-lang" LABEL source="https://github.com/lf-lang/reactor-c/tree/main/core/federated/RTI" COPY --from=builder /usr/local/bin/RTI /usr/local/bin/RTI diff --git a/core/lf_token.c b/core/lf_token.c index 48913f85b..397f118c9 100644 --- a/core/lf_token.c +++ b/core/lf_token.c @@ -26,7 +26,20 @@ int _lf_count_token_allocations; #include "platform.h" // Enter/exit critical sections #include "port.h" // Defines lf_port_base_t. -lf_token_t* _lf_tokens_allocated_in_reactions = NULL; +/** + * @brief List of tokens created within reactions that must be freed. + * + * Tokens created by lf_writable_copy, which is automatically invoked + * when an input is mutable, must have their reference count decremented + * at the end of a tag (or the beginning of the next tag). + * Otherwise, their memory could leak. If they are passed on to + * an output or to a call to lf_schedule during the reaction, then + * those will also result in incremented reference counts, enabling + * the token to live on until used. For example, a new token created + * by lf_writable_copy could become the new template token for an output + * via a call to lf_set. + */ +static lf_token_t* _lf_tokens_allocated_in_reactions = NULL; //////////////////////////////////////////////////////////////////// //// Global variables not visible outside this file. @@ -197,6 +210,8 @@ lf_token_t* _lf_new_token(token_type_t* type, void* value, size_t length) { if (hashset_iterator_next(iterator) >= 0) { result = hashset_iterator_value(iterator); hashset_remove(_lf_token_recycling_bin, result); + // Make sure there isn't a previous value. + result->value = NULL; LF_PRINT_DEBUG("_lf_new_token: Retrieved token from the recycling bin: %p", (void*)result); } free(iterator); @@ -352,8 +367,7 @@ token_freed _lf_done_using(lf_token_t* token) { void _lf_free_token_copies() { while (_lf_tokens_allocated_in_reactions != NULL) { - lf_token_t* next = _lf_tokens_allocated_in_reactions->next; _lf_done_using(_lf_tokens_allocated_in_reactions); - _lf_tokens_allocated_in_reactions = next; + _lf_tokens_allocated_in_reactions = _lf_tokens_allocated_in_reactions->next; } } diff --git a/core/reactor_common.c b/core/reactor_common.c index 83a1592b9..358480eb8 100644 --- a/core/reactor_common.c +++ b/core/reactor_common.c @@ -303,13 +303,19 @@ void _lf_pop_events(environment_t* env) { } } - // Mark the trigger present. + // Mark the trigger present event->trigger->status = present; // If the trigger is a periodic timer, create a new event for its next execution. if (event->trigger->is_timer && event->trigger->period > 0LL) { // Reschedule the trigger. lf_schedule_trigger(env, event->trigger, event->trigger->period, NULL); + } else { + // For actions, store a pointer to status field so it is reset later. + int ipfas = lf_atomic_fetch_add(&env->is_present_fields_abbreviated_size, 1); + if (ipfas < env->is_present_fields_size) { + env->is_present_fields_abbreviated[ipfas] = (bool*)&event->trigger->status; + } } // Copy the token pointer into the trigger struct so that the @@ -323,9 +329,6 @@ void _lf_pop_events(environment_t* env) { // freed prematurely. _lf_done_using(token); - // Mark the trigger present. - event->trigger->status = present; - lf_recycle_event(env, event); // Peek at the next event in the event queue. @@ -383,12 +386,16 @@ void _lf_initialize_timer(environment_t* env, trigger_t* timer) { // Get an event_t struct to put on the event queue. // Recycle event_t structs, if possible. - event_t* e = lf_get_new_event(env); - e->trigger = timer; - e->base.tag = (tag_t){.time = lf_time_logical(env) + delay, .microstep = 0}; - // NOTE: No lock is being held. Assuming this only happens at startup. - pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)e); - tracepoint_schedule(env, timer, delay); // Trace even though schedule is not called. + tag_t next_tag = (tag_t){.time = lf_time_logical(env) + delay, .microstep = 0}; + // Do not schedule the next event if it is after the timeout. + if (!lf_is_tag_after_stop_tag(env, next_tag)) { + event_t* e = lf_get_new_event(env); + e->trigger = timer; + e->base.tag = next_tag; + // NOTE: No lock is being held. Assuming this only happens at startup. + pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)e); + tracepoint_schedule(env, timer, delay); // Trace even though schedule is not called. + } } void _lf_initialize_timers(environment_t* env) { @@ -603,8 +610,12 @@ trigger_handle_t _lf_insert_reactions_for_trigger(environment_t* env, trigger_t* // for which we decrement the reference count. _lf_replace_template_token((token_template_t*)trigger, token); - // Mark the trigger present. + // Mark the trigger present and store a pointer to it for marking it as absent later. trigger->status = present; + int ipfas = lf_atomic_fetch_add(&env->is_present_fields_abbreviated_size, 1); + if (ipfas < env->is_present_fields_size) { + env->is_present_fields_abbreviated[ipfas] = (bool*)&trigger->status; + } // Push the corresponding reactions for this trigger // onto the reaction queue. @@ -1096,6 +1107,13 @@ void initialize_global(void) { // Call the code-generated function to initialize all actions, timers, and ports // This is done for all environments/enclaves at the same time. _lf_initialize_trigger_objects(); + +#if !defined(LF_SINGLE_THREADED) && !defined(NDEBUG) + // If we are testing, verify that environment with pointers is correctly set up. + for (int i = 0; i < num_envs; i++) { + environment_verify(&envs[i]); + } +#endif } /** diff --git a/core/threaded/reactor_threaded.c b/core/threaded/reactor_threaded.c index 56a53cffa..82dd2b648 100644 --- a/core/threaded/reactor_threaded.c +++ b/core/threaded/reactor_threaded.c @@ -176,7 +176,7 @@ void lf_set_present(lf_port_base_t* port) { return; environment_t* env = port->source_reactor->environment; bool* is_present_field = &port->is_present; - int ipfas = lf_atomic_fetch_add32(&env->is_present_fields_abbreviated_size, 1); + int ipfas = lf_atomic_fetch_add(&env->is_present_fields_abbreviated_size, 1); if (ipfas < env->is_present_fields_size) { env->is_present_fields_abbreviated[ipfas] = is_present_field; } @@ -184,7 +184,7 @@ void lf_set_present(lf_port_base_t* port) { // Support for sparse destination multiports. if (port->sparse_record && port->destination_channel >= 0 && port->sparse_record->size >= 0) { - size_t next = (size_t)lf_atomic_fetch_add32(&port->sparse_record->size, 1); + size_t next = (size_t)lf_atomic_fetch_add(&port->sparse_record->size, 1); if (next >= port->sparse_record->capacity) { // Buffer is full. Have to revert to the classic iteration. port->sparse_record->size = -1; @@ -1023,13 +1023,17 @@ int lf_reactor_c_main(int argc, const char* argv[]) { #endif LF_PRINT_DEBUG("Start time: " PRINTF_TIME "ns", start_time); - struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION}; #ifdef MINIMAL_STDLIB lf_print("---- Start execution ----"); #else - lf_print("---- Start execution at time %s---- plus %ld nanoseconds", ctime(&physical_time_timespec.tv_sec), - physical_time_timespec.tv_nsec); + struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION}; + struct tm* time_info = localtime(&physical_time_timespec.tv_sec); + char buffer[80]; // Long enough to hold the formatted time string. + // Use strftime rather than ctime because as of C23, ctime is deprecated. + strftime(buffer, sizeof(buffer), "%a %b %d %H:%M:%S %Y", time_info); + + lf_print("---- Start execution on %s ---- plus %ld nanoseconds", buffer, physical_time_timespec.tv_nsec); #endif // MINIMAL_STDLIB // Create and initialize the environments for each enclave @@ -1114,6 +1118,8 @@ int lf_reactor_c_main(int argc, const char* argv[]) { } else { int failure = lf_thread_join(env->thread_ids[j], &worker_thread_exit_status); if (failure) { + // Windows warns that strerror is deprecated but doesn't define strerror_r. + // There seems to be no portable replacement. lf_print_error("Failed to join thread listening for incoming messages: %s", strerror(failure)); } } diff --git a/core/threaded/scheduler_GEDF_NP.c b/core/threaded/scheduler_GEDF_NP.c index e77257209..84afee379 100644 --- a/core/threaded/scheduler_GEDF_NP.c +++ b/core/threaded/scheduler_GEDF_NP.c @@ -228,14 +228,14 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) { (void)worker_number; // Suppress unused parameter warning. - if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) { + if (!lf_atomic_bool_compare_and_swap((int*)&done_reaction->status, queued, inactive)) { lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued); } } void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { (void)worker_number; // Suppress unused parameter warning. - if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) { + if (reaction == NULL || !lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued)) { return; } LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index)); diff --git a/core/threaded/scheduler_NP.c b/core/threaded/scheduler_NP.c index 54a611ea8..fd0ccfb04 100644 --- a/core/threaded/scheduler_NP.c +++ b/core/threaded/scheduler_NP.c @@ -77,7 +77,7 @@ static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction scheduler->indexes[reaction_level] = 0; } #endif - int reaction_q_level_index = lf_atomic_fetch_add32((int32_t*)&scheduler->indexes[reaction_level], 1); + int reaction_q_level_index = lf_atomic_fetch_add((int*)&scheduler->indexes[reaction_level], 1); assert(reaction_q_level_index >= 0); LF_PRINT_DEBUG("Scheduler: Accessing triggered reactions at the level %zu with index %d.", reaction_level, reaction_q_level_index); @@ -203,7 +203,7 @@ static void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* schedul static void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { // Increment the number of idle workers by 1 and check if this is the last // worker thread to become idle. - if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == (int)scheduler->number_of_workers) { + if (lf_atomic_add_fetch((int*)&scheduler->number_of_idle_workers, 1) == (int)scheduler->number_of_workers) { // Last thread to go idle LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", worker_number); // Call on the scheduler to distribute work or advance tag. @@ -322,7 +322,7 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu // the current level (if there is a causality loop) LF_MUTEX_LOCK(&scheduler->custom_data->array_of_mutexes[current_level]); #endif - int current_level_q_index = lf_atomic_add_fetch32((int32_t*)&scheduler->indexes[current_level], -1); + int current_level_q_index = lf_atomic_add_fetch((int*)&scheduler->indexes[current_level], -1); if (current_level_q_index >= 0) { LF_PRINT_DEBUG("Scheduler: Worker %d popping reaction with level %zu, index " "for level: %d.", @@ -361,7 +361,7 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu */ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) { (void)worker_number; - if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) { + if (!lf_atomic_bool_compare_and_swap((int*)&done_reaction->status, queued, inactive)) { lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued); } } @@ -388,7 +388,7 @@ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { (void)worker_number; - if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) { + if (reaction == NULL || !lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued)) { return; } LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index)); diff --git a/core/threaded/scheduler_adaptive.c b/core/threaded/scheduler_adaptive.c index 1f90c90a6..5a926aba6 100644 --- a/core/threaded/scheduler_adaptive.c +++ b/core/threaded/scheduler_adaptive.c @@ -207,7 +207,7 @@ static void worker_assignments_free(lf_scheduler_t* scheduler) { static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) { worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; #ifndef FEDERATED - int index = lf_atomic_add_fetch32((int32_t*)(worker_assignments->num_reactions_by_worker + worker), -1); + int index = lf_atomic_add_fetch(worker_assignments->num_reactions_by_worker + worker, -1); if (index >= 0) { return worker_assignments->reactions_by_worker[worker][index]; } @@ -223,9 +223,9 @@ static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) { old_num_reactions = current_num_reactions; if (old_num_reactions <= 0) return NULL; - } while ((current_num_reactions = lf_atomic_val_compare_and_swap32( - (int32_t*)(worker_assignments->num_reactions_by_worker + worker), old_num_reactions, - (index = old_num_reactions - 1))) != old_num_reactions); + } while ((current_num_reactions = + lf_atomic_val_compare_and_swap(worker_assignments->num_reactions_by_worker + worker, old_num_reactions, + (index = old_num_reactions - 1))) != old_num_reactions); return worker_assignments->reactions_by_worker[worker][index]; #endif } @@ -282,7 +282,7 @@ static void worker_assignments_put(lf_scheduler_t* scheduler, reaction_t* reacti hash = hash ^ (hash >> 31); size_t worker = hash % worker_assignments->num_workers_by_level[level]; size_t num_preceding_reactions = - lf_atomic_fetch_add32((int32_t*)&worker_assignments->num_reactions_by_worker_by_level[level][worker], 1); + lf_atomic_fetch_add(&worker_assignments->num_reactions_by_worker_by_level[level][worker], 1); worker_assignments->reactions_by_worker_by_level[level][worker][num_preceding_reactions] = reaction; } @@ -383,7 +383,7 @@ static bool worker_states_finished_with_level_locked(lf_scheduler_t* scheduler, assert(((int64_t)worker_assignments->num_reactions_by_worker[worker]) <= 0); // Why use an atomic operation when we are supposed to be "as good as locked"? Because I took a // shortcut, and the shortcut was imperfect. - size_t ret = lf_atomic_add_fetch32((int32_t*)&worker_states->num_loose_threads, -1); + size_t ret = lf_atomic_add_fetch(&worker_states->num_loose_threads, -1); assert(ret <= worker_assignments->max_num_workers); // Check for underflow return !ret; } @@ -726,7 +726,7 @@ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { LF_ASSERT(worker_number >= -1, "Sched: Invalid worker number"); - if (!lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) + if (!lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued)) return; worker_assignments_put(scheduler, reaction); } diff --git a/core/utils/lf_semaphore.c b/core/utils/lf_semaphore.c index 2d0255ecb..3d79f9e4c 100644 --- a/core/utils/lf_semaphore.c +++ b/core/utils/lf_semaphore.c @@ -41,7 +41,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * @param count The count to start with. * @return lf_semaphore_t* Can be NULL on error. */ -lf_semaphore_t* lf_semaphore_new(int count) { +lf_semaphore_t* lf_semaphore_new(size_t count) { lf_semaphore_t* semaphore = (lf_semaphore_t*)malloc(sizeof(lf_semaphore_t)); LF_MUTEX_INIT(&semaphore->mutex); LF_COND_INIT(&semaphore->cond, &semaphore->mutex); @@ -55,7 +55,7 @@ lf_semaphore_t* lf_semaphore_new(int count) { * @param semaphore Instance of a semaphore * @param i The count to add. */ -void lf_semaphore_release(lf_semaphore_t* semaphore, int i) { +void lf_semaphore_release(lf_semaphore_t* semaphore, size_t i) { assert(semaphore != NULL); LF_MUTEX_LOCK(&semaphore->mutex); semaphore->count += i; diff --git a/core/utils/util.c b/core/utils/util.c index 62de9fd27..4fb956b3a 100644 --- a/core/utils/util.c +++ b/core/utils/util.c @@ -206,6 +206,8 @@ void lf_print_error_system_failure(const char* format, ...) { va_start(args, format); lf_vprint_error(format, args); va_end(args); + // Windows warns that strerror is deprecated but doesn't define strerror_r. + // There seems to be no portable replacement. lf_print_error_and_exit("Error %d: %s", errno, strerror(errno)); exit(EXIT_FAILURE); } diff --git a/include/core/environment.h b/include/core/environment.h index 8099eed26..9f3960a6f 100644 --- a/include/core/environment.h +++ b/include/core/environment.h @@ -112,6 +112,13 @@ int environment_init(environment_t* env, const char* name, int id, int num_worke int num_is_present_fields, int num_modes, int num_state_resets, int num_watchdogs, const char* trace_file_name); +/** + * @brief Verify that the environment is correctly set up. + * + * @param env + */ +void environment_verify(environment_t* env); + /** * @brief Free the dynamically allocated memory on the environment struct. * @param env The environment in which we are executing. diff --git a/include/core/lf_token.h b/include/core/lf_token.h index 219538dd3..49069fa95 100644 --- a/include/core/lf_token.h +++ b/include/core/lf_token.h @@ -108,9 +108,9 @@ typedef struct lf_token_t { * A record of the subset of channels of a multiport that have present inputs. */ typedef struct lf_sparse_io_record_t { - int size; // -1 if overflowed. 0 if empty. - size_t capacity; // Max number of writes to be considered sparse. - size_t present_channels[]; // Array of channel indices that are present. + int size; // -1 if overflowed. 0 if empty. + size_t capacity; // Max number of writes to be considered sparse. + size_t* present_channels; // Array of channel indices that are present. } lf_sparse_io_record_t; /** @@ -151,20 +151,6 @@ typedef struct lf_port_base_t { ////////////////////////////////////////////////////////// //// Global variables -/** - * @brief List of tokens created within reactions that must be freed. - * Tokens created by lf_writable_copy, which is automatically invoked - * when an input is mutable, must have their reference count decremented - * at the end of a tag (or the beginning of the next tag). - * Otherwise, their memory could leak. If they are passed on to - * an output or to a call to lf_schedule during the reaction, then - * those will also result in incremented reference counts, enabling - * the token to live on until used. For example, a new token created - * by lf_writable_copy could become the new template token for an output - * via a call to lf_set. - */ -extern lf_token_t* _lf_tokens_allocated_in_reactions; - /** * Counter used to issue a warning if memory is * allocated for tokens and never freed. Note that diff --git a/include/core/utils/lf_semaphore.h b/include/core/utils/lf_semaphore.h index 73d3e4eb4..341c43cc4 100644 --- a/include/core/utils/lf_semaphore.h +++ b/include/core/utils/lf_semaphore.h @@ -41,7 +41,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include typedef struct { - int count; + size_t count; lf_mutex_t mutex; lf_cond_t cond; } lf_semaphore_t; @@ -52,7 +52,7 @@ typedef struct { * @param count The count to start with. * @return lf_semaphore_t* Can be NULL on error. */ -lf_semaphore_t* lf_semaphore_new(int count); +lf_semaphore_t* lf_semaphore_new(size_t count); /** * @brief Release the 'semaphore' and add 'i' to its count. @@ -60,7 +60,7 @@ lf_semaphore_t* lf_semaphore_new(int count); * @param semaphore Instance of a semaphore * @param i The count to add. */ -void lf_semaphore_release(lf_semaphore_t* semaphore, int i); +void lf_semaphore_release(lf_semaphore_t* semaphore, size_t i); /** * @brief Acquire the 'semaphore'. Will block if count is 0. diff --git a/logging/api/logging_macros.h b/logging/api/logging_macros.h index 6f7ea1eba..3e22950b5 100644 --- a/logging/api/logging_macros.h +++ b/logging/api/logging_macros.h @@ -1,3 +1,5 @@ +#ifndef LOGGING_MACROS_H +#define LOGGING_MACROS_H #include "logging.h" /** @@ -12,6 +14,11 @@ #define LOG_LEVEL LOG_LEVEL_INFO #endif +// To prevent warnings "conditional expression is constant", we define static booleans +// here instead of directly testing LOG_LEVEL in the if statements in the macros below. +static const bool _lf_log_level_is_log = LOG_LEVEL >= LOG_LEVEL_LOG; +static const bool _lf_log_level_is_debug = LOG_LEVEL >= LOG_LEVEL_DEBUG; + /** * A macro used to print useful logging information. It can be enabled * by setting the target property 'logging' to 'LOG' or @@ -31,7 +38,7 @@ */ #define LF_PRINT_LOG(format, ...) \ do { \ - if (LOG_LEVEL >= LOG_LEVEL_LOG) { \ + if (_lf_log_level_is_log) { \ lf_print_log(format, ##__VA_ARGS__); \ } \ } while (0) @@ -54,7 +61,7 @@ */ #define LF_PRINT_DEBUG(format, ...) \ do { \ - if (LOG_LEVEL >= LOG_LEVEL_DEBUG) { \ + if (_lf_log_level_is_debug) { \ lf_print_debug(format, ##__VA_ARGS__); \ } \ } while (0) @@ -100,3 +107,4 @@ } \ } while (0) #endif // NDEBUG +#endif // LOGGING_MACROS_H \ No newline at end of file diff --git a/low_level_platform/api/platform/lf_atomic.h b/low_level_platform/api/platform/lf_atomic.h index 391678293..e40de9b25 100644 --- a/low_level_platform/api/platform/lf_atomic.h +++ b/low_level_platform/api/platform/lf_atomic.h @@ -11,14 +11,14 @@ #include /** - * @brief Atomically fetch a 32bit integer from memory and add a value to it. + * @brief Atomically fetch an integer from memory and add a value to it. * Return the value that was previously in memory. * * @param ptr A pointer to the memory location. * @param val The value to be added. * @return The value previously in memory. */ -int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t val); +int lf_atomic_fetch_add(int* ptr, int val); /** * @brief Atomically fetch 64-bit integer from memory and add a value to it. @@ -31,14 +31,14 @@ int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t val); int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t val); /** - * @brief Atomically fetch a 32-bit integer from memory and add a value to it. + * @brief Atomically fetch an integer from memory and add a value to it. * Return the new value of the memory. * * @param ptr A pointer to the memory location. * @param val The value to be added. * @return The new value in memory. */ -int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t val); +int lf_atomic_add_fetch(int* ptr, int val); /** * @brief Atomically fetch a 64-bit integer from memory and add a value to it. @@ -60,7 +60,7 @@ int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t val); * @param newval The value to swap in. * @return Whether a swap was performed or not. */ -bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval); +bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval); /** * @brief Atomically perform a compare-and-swap operation on a 64 bit integer in @@ -75,7 +75,7 @@ bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t new bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval); /** - * @brief Atomically perform a compare-and-swap operation on a 32 bit integer in + * @brief Atomically perform a compare-and-swap operation on an integer in * memory. If the value in memory is equal to `oldval` replace it with `newval`. * Return the content of the memory before the potential swap operation is * performed. @@ -85,7 +85,7 @@ bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t new * @param newval The value to swap in. * @return The value in memory prior to the swap. */ -int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval); +int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval); /** * @brief Atomically perform a compare-and-swap operation on a 64 bit integer in diff --git a/low_level_platform/api/platform/lf_zephyr_support.h b/low_level_platform/api/platform/lf_zephyr_support.h index 724bbe4e5..44d91bcbd 100644 --- a/low_level_platform/api/platform/lf_zephyr_support.h +++ b/low_level_platform/api/platform/lf_zephyr_support.h @@ -50,6 +50,8 @@ typedef struct { } lf_cond_t; typedef struct k_thread* lf_thread_t; +void _lf_initialize_clock_zephyr_common(); + #endif // !LF_SINGLE_THREADED #endif // LF_ZEPHYR_SUPPORT_H diff --git a/low_level_platform/impl/src/lf_atomic_gcc_clang.c b/low_level_platform/impl/src/lf_atomic_gcc_clang.c index 30d671a8a..bca144459 100644 --- a/low_level_platform/impl/src/lf_atomic_gcc_clang.c +++ b/low_level_platform/impl/src/lf_atomic_gcc_clang.c @@ -11,17 +11,17 @@ #include "platform/lf_atomic.h" #include "low_level_platform.h" -int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { return __sync_fetch_and_add(ptr, value); } +int lf_atomic_fetch_add(int* ptr, int value) { return __sync_fetch_and_add(ptr, value); } int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return __sync_fetch_and_add(ptr, value); } -int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { return __sync_add_and_fetch(ptr, value); } +int lf_atomic_add_fetch(int* ptr, int value) { return __sync_add_and_fetch(ptr, value); } int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return __sync_add_and_fetch(ptr, value); } -bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) { return __sync_bool_compare_and_swap(ptr, oldval, newval); } bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) { return __sync_bool_compare_and_swap(ptr, oldval, newval); } -int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) { return __sync_val_compare_and_swap(ptr, oldval, newval); } int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) { diff --git a/low_level_platform/impl/src/lf_atomic_irq.c b/low_level_platform/impl/src/lf_atomic_irq.c index 3ac073e91..9bcf6bb0f 100644 --- a/low_level_platform/impl/src/lf_atomic_irq.c +++ b/low_level_platform/impl/src/lf_atomic_irq.c @@ -17,9 +17,9 @@ int lf_disable_interrupts_nested(); int lf_enable_interrupts_nested(); -int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { +int lf_atomic_fetch_add(int* ptr, int value) { lf_disable_interrupts_nested(); - int32_t res = *ptr; + int res = *ptr; *ptr += value; lf_enable_interrupts_nested(); return res; @@ -33,7 +33,7 @@ int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return res; } -int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { +int lf_atomic_add_fetch(int* ptr, int value) { lf_disable_interrupts_nested(); int res = *ptr + value; *ptr = res; @@ -49,7 +49,7 @@ int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return res; } -bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) { lf_disable_interrupts_nested(); bool res = false; if ((*ptr) == oldval) { @@ -71,7 +71,7 @@ bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t new return res; } -int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) { lf_disable_interrupts_nested(); int res = *ptr; if ((*ptr) == oldval) { diff --git a/low_level_platform/impl/src/lf_atomic_windows.c b/low_level_platform/impl/src/lf_atomic_windows.c index 1db0fa2de..ff5a01750 100644 --- a/low_level_platform/impl/src/lf_atomic_windows.c +++ b/low_level_platform/impl/src/lf_atomic_windows.c @@ -10,18 +10,18 @@ #include "platform/lf_atomic.h" #include -int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { return InterlockedExchangeAdd(ptr, value); } +int lf_atomic_fetch_add(int* ptr, int value) { return InterlockedExchangeAdd((LONG*)ptr, (LONG)value); } int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return InterlockedExchangeAdd64(ptr, value); } -int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { return InterlockedAdd(ptr, value); } +int lf_atomic_add_fetch(int* ptr, int value) { return InterlockedAdd((LONG*)ptr, (LONG)value); } int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return InterlockedAdd64(ptr, value); } -bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { - return (InterlockedCompareExchange(ptr, newval, oldval) == oldval); +bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) { + return (InterlockedCompareExchange((LONG*)ptr, (LONG)newval, (LONG)oldval) == oldval); } bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) { return (InterlockedCompareExchange64(ptr, newval, oldval) == oldval); } -int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { - return InterlockedCompareExchange(ptr, newval, oldval); +int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) { + return InterlockedCompareExchange((LONG*)ptr, (LONG)newval, (LONG)oldval); } int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) { return InterlockedCompareExchange64(ptr, newval, oldval); diff --git a/low_level_platform/impl/src/lf_flexpret_support.c b/low_level_platform/impl/src/lf_flexpret_support.c index cf37c1b8a..7fb6d2a48 100644 --- a/low_level_platform/impl/src/lf_flexpret_support.c +++ b/low_level_platform/impl/src/lf_flexpret_support.c @@ -178,10 +178,7 @@ int lf_available_cores() { return FP_THREADS - 1; // Return the number of Flexpret HW threads } -lf_thread_t lf_thread_self() { - // Not implemented. - return NULL; -} +lf_thread_t lf_thread_self() { return read_hartid(); } int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { /** diff --git a/low_level_platform/impl/src/lf_patmos_support.c b/low_level_platform/impl/src/lf_patmos_support.c index 43f20710c..6529c95bc 100644 --- a/low_level_platform/impl/src/lf_patmos_support.c +++ b/low_level_platform/impl/src/lf_patmos_support.c @@ -70,6 +70,26 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) { } } +int lf_sleep(interval_t sleep_duration) { + instant_t now; + _lf_clock_gettime(&now); + instant_t wakeup = now + sleep_duration; + + // Do busy sleep + do { + _lf_clock_gettime(&now); + } while ((now < wakeup)); + return 0; +} + +/** + * Pause execution for a number of nanoseconds. + * + * @return 0 for success, or -1 for failure. In case of failure, errno will be + * set appropriately (see `man 2 clock_nanosleep`). + */ +int lf_nanosleep(interval_t requested_time) { return lf_sleep(requested_time); } + /** * Patmos clock does not need initialization. */ diff --git a/low_level_platform/impl/src/lf_platform_util.c b/low_level_platform/impl/src/lf_platform_util.c index 0225aa423..212e6ea83 100644 --- a/low_level_platform/impl/src/lf_platform_util.c +++ b/low_level_platform/impl/src/lf_platform_util.c @@ -21,6 +21,6 @@ static thread_local int lf_thread_id_var = -1; int lf_thread_id() { return lf_thread_id_var; } -void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); } +void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add(&_lf_worker_thread_count, 1); } #endif #endif diff --git a/low_level_platform/impl/src/lf_windows_support.c b/low_level_platform/impl/src/lf_windows_support.c index 61424ac7f..c4524eda4 100644 --- a/low_level_platform/impl/src/lf_windows_support.c +++ b/low_level_platform/impl/src/lf_windows_support.c @@ -39,6 +39,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include #include +#include // For fprintf() #include "platform/lf_windows_support.h" #include "low_level_platform.h" @@ -64,7 +65,7 @@ void _lf_initialize_clock() { if (_lf_use_performance_counter) { _lf_frequency_to_ns = (double)performance_frequency.QuadPart / BILLION; } else { - lf_print_error("High resolution performance counter is not supported on this machine."); + fprintf(stderr, "ERROR: High resolution performance counter is not supported on this machine.\n"); _lf_frequency_to_ns = 0.01; } } @@ -89,9 +90,9 @@ int _lf_clock_gettime(instant_t* t) { } LARGE_INTEGER windows_time; if (_lf_use_performance_counter) { - int result = QueryPerformanceCounter(&windows_time); + result = QueryPerformanceCounter(&windows_time); if (result == 0) { - lf_print_error("_lf_clock_gettime(): Failed to read the value of the physical clock."); + fprintf(stderr, "ERROR: _lf_clock_gettime(): Failed to read the value of the physical clock.\n"); return result; } } else { @@ -140,6 +141,7 @@ int lf_sleep(interval_t sleep_duration) { } int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) { + (void)env; // Suppress unused variable warning. interval_t sleep_duration = wakeup_time - lf_time_physical(); if (sleep_duration <= 0) { @@ -165,7 +167,11 @@ int lf_available_cores() { lf_thread_t lf_thread_self() { return GetCurrentThread(); } int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { - uintptr_t handle = _beginthreadex(NULL, 0, lf_thread, arguments, 0, NULL); + // _beginthreadex requires a function that returns unsigned rather than void*. + // So the following double cast suppresses the warning: + // '_beginthreadex_proc_type' differs in levels of indirection from 'void *(__cdecl *)(void *)' + uintptr_t handle = + _beginthreadex(NULL, 0, (unsigned(__stdcall*)(void*))(uintptr_t(__stdcall*)(void*))lf_thread, arguments, 0, NULL); *thread = (HANDLE)handle; if (handle == 0) { return errno; @@ -183,6 +189,9 @@ int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* argum */ int lf_thread_join(lf_thread_t thread, void** thread_return) { DWORD retvalue = WaitForSingleObject(thread, INFINITE); + if (thread_return != NULL) { + *thread_return = (void*)retvalue; + } if (retvalue == WAIT_FAILED) { return EINVAL; } @@ -192,11 +201,23 @@ int lf_thread_join(lf_thread_t thread, void** thread_return) { /** * Real-time scheduling API not implemented for Windows. */ -int lf_thread_set_cpu(lf_thread_t thread, size_t cpu_number) { return -1; } +int lf_thread_set_cpu(lf_thread_t thread, size_t cpu_number) { + (void)thread; // Suppress unused variable warning. + (void)cpu_number; // Suppress unused variable warning. + return -1; +} -int lf_thread_set_priority(lf_thread_t thread, int priority) { return -1; } +int lf_thread_set_priority(lf_thread_t thread, int priority) { + (void)thread; // Suppress unused variable warning. + (void)priority; // Suppress unused variable warning. + return -1; +} -int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) { return -1; } +int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) { + (void)thread; // Suppress unused variable warning. + (void)policy; // Suppress unused variable warning. + return -1; +} int lf_mutex_init(_lf_critical_section_t* critical_section) { // Set up a recursive mutex @@ -278,10 +299,20 @@ int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) { } // convert ns to ms and round up to closest full integer - DWORD wait_duration_ms = (wait_duration + 999999LL) / 1000000LL; + interval_t wait_duration_ms = (wait_duration + 999999LL) / 1000000LL; + DWORD wait_duration_saturated; + if (wait_duration_ms > 0xFFFFFFFFLL) { + // Saturate at 0xFFFFFFFFLL + wait_duration_saturated = (DWORD)0xFFFFFFFFLL; + } else if (wait_duration_ms <= 0) { + // No need to wait. Return indicating that the wait is complete. + return LF_TIMEOUT; + } else { + wait_duration_saturated = (DWORD)wait_duration_ms; + } int return_value = (int)SleepConditionVariableCS((PCONDITION_VARIABLE)&cond->condition, - (PCRITICAL_SECTION)cond->critical_section, wait_duration_ms); + (PCRITICAL_SECTION)cond->critical_section, wait_duration_saturated); if (return_value == 0) { // Error if (GetLastError() == ERROR_TIMEOUT) { diff --git a/low_level_platform/impl/src/lf_zephyr_clock_kernel.c b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c index e23332f81..9dc343bc5 100644 --- a/low_level_platform/impl/src/lf_zephyr_clock_kernel.c +++ b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c @@ -113,6 +113,7 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) { return 0; } else { lf_print_error_and_exit("k_sem_take returned %d", res); + return -1; } } diff --git a/low_level_platform/impl/src/lf_zephyr_support.c b/low_level_platform/impl/src/lf_zephyr_support.c index 5e5efb82d..74ae9bf90 100644 --- a/low_level_platform/impl/src/lf_zephyr_support.c +++ b/low_level_platform/impl/src/lf_zephyr_support.c @@ -36,14 +36,21 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "platform/lf_platform_util.h" #include "low_level_platform.h" #include "tag.h" +#include "logging.h" #include +#include // Keep track of nested critical sections static uint32_t num_nested_critical_sections = 0; // Keep track of IRQ mask when entering critical section so we can enable again after static volatile unsigned irq_mask = 0; +// Catch kernel panics from Zephyr +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf* esf) { + lf_print_error_and_exit("Zephyr kernel panic reason=%d", reason); +} + int lf_sleep(interval_t sleep_duration) { k_sleep(K_NSEC(sleep_duration)); return 0; @@ -81,8 +88,12 @@ int lf_enable_interrupts_nested() { // If NUMBER_OF_WORKERS is not specified, or set to 0, then we default to 1. #if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS == 0 #undef NUMBER_OF_WORKERS +#if defined(LF_REACTION_GRAPH_BREADTH) +#define NUMBER_OF_WORKERS LF_REACTION_GRAPH_BREADTH +#else #define NUMBER_OF_WORKERS 1 #endif +#endif // If USER_THREADS is not specified, then default to 0. #if !defined(USER_THREADS) @@ -149,9 +160,9 @@ int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* argum int lf_thread_join(lf_thread_t thread, void** thread_return) { return k_thread_join(thread, K_FOREVER); } void initialize_lf_thread_id() { - static int _lf_worker_thread_count = 0; + static int32_t _lf_worker_thread_count = 0; int* thread_id = (int*)malloc(sizeof(int)); - *thread_id = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); + *thread_id = lf_atomic_fetch_add(&_lf_worker_thread_count, 1); k_thread_custom_data_set(thread_id); } diff --git a/trace/impl/src/trace_impl.c b/trace/impl/src/trace_impl.c index 895247e87..02ffbb3bd 100644 --- a/trace/impl/src/trace_impl.c +++ b/trace/impl/src/trace_impl.c @@ -43,46 +43,46 @@ static version_t version = {.build_config = * See trace.h. * @return The number of items written to the object table or -1 for failure. */ -static int write_trace_header(trace_t* trace) { - if (trace->_lf_trace_file != NULL) { - size_t items_written = fwrite(&start_time, sizeof(int64_t), 1, trace->_lf_trace_file); +static int write_trace_header(trace_t* t) { + if (t->_lf_trace_file != NULL) { + size_t items_written = fwrite(&start_time, sizeof(int64_t), 1, t->_lf_trace_file); if (items_written != 1) - _LF_TRACE_FAILURE(trace); + _LF_TRACE_FAILURE(t); // The next item in the header is the size of the // _lf_trace_object_descriptions table. - items_written = fwrite(&trace->_lf_trace_object_descriptions_size, sizeof(int), 1, trace->_lf_trace_file); + items_written = fwrite(&t->_lf_trace_object_descriptions_size, sizeof(int), 1, t->_lf_trace_file); if (items_written != 1) - _LF_TRACE_FAILURE(trace); + _LF_TRACE_FAILURE(t); // Next we write the table. - for (size_t i = 0; i < trace->_lf_trace_object_descriptions_size; i++) { + for (size_t i = 0; i < t->_lf_trace_object_descriptions_size; i++) { // Write the pointer to the self struct. - items_written = fwrite(&trace->_lf_trace_object_descriptions[i].pointer, sizeof(void*), 1, trace->_lf_trace_file); + items_written = fwrite(&t->_lf_trace_object_descriptions[i].pointer, sizeof(void*), 1, t->_lf_trace_file); if (items_written != 1) - _LF_TRACE_FAILURE(trace); + _LF_TRACE_FAILURE(t); // Write the pointer to the trigger_t struct. - items_written = fwrite(&trace->_lf_trace_object_descriptions[i].trigger, sizeof(void*), 1, trace->_lf_trace_file); + items_written = fwrite(&t->_lf_trace_object_descriptions[i].trigger, sizeof(void*), 1, t->_lf_trace_file); if (items_written != 1) - _LF_TRACE_FAILURE(trace); + _LF_TRACE_FAILURE(t); // Write the object type. - items_written = fwrite(&trace->_lf_trace_object_descriptions[i].type, // Write the pointer value. - sizeof(_lf_trace_object_t), 1, trace->_lf_trace_file); + items_written = fwrite(&t->_lf_trace_object_descriptions[i].type, // Write the pointer value. + sizeof(_lf_trace_object_t), 1, t->_lf_trace_file); if (items_written != 1) - _LF_TRACE_FAILURE(trace); + _LF_TRACE_FAILURE(t); // Write the description. - size_t description_size = strlen(trace->_lf_trace_object_descriptions[i].description); - items_written = fwrite(trace->_lf_trace_object_descriptions[i].description, sizeof(char), + size_t description_size = strlen(t->_lf_trace_object_descriptions[i].description); + items_written = fwrite(t->_lf_trace_object_descriptions[i].description, sizeof(char), description_size + 1, // Include null terminator. - trace->_lf_trace_file); + t->_lf_trace_file); if (items_written != description_size + 1) - _LF_TRACE_FAILURE(trace); + _LF_TRACE_FAILURE(t); } } - return trace->_lf_trace_object_descriptions_size; + return (int)t->_lf_trace_object_descriptions_size; } /** @@ -125,37 +125,38 @@ static void flush_trace_locked(trace_t* trace, int worker) { /** * @brief Flush the specified buffer to a file. + * @param t The trace struct. * @param worker Index specifying the trace to flush. */ -static void flush_trace(trace_t* trace, int worker) { +static void flush_trace(trace_t* t, int worker) { // To avoid having more than one worker writing to the file at the same time, // enter a critical section. lf_platform_mutex_lock(trace_mutex); - flush_trace_locked(trace, worker); + flush_trace_locked(t, worker); lf_platform_mutex_unlock(trace_mutex); } -static void start_trace(trace_t* trace, int max_num_local_threads) { +static void start_trace(trace_t* t, int max_num_local_threads) { // Do not write the trace header information to the file yet - // so that startup reactions can register user-defined trace objects. + // so that startup reactions can register user-defined t objects. // write_trace_header(); - trace->_lf_trace_header_written = false; + t->_lf_trace_header_written = false; // Allocate an array of arrays of trace records, one per worker thread plus one // for the 0 thread (the main thread, or in an single-threaded program, the only // thread). - trace->_lf_number_of_trace_buffers = max_num_local_threads; - trace->_lf_trace_buffer = - (trace_record_nodeps_t**)malloc(sizeof(trace_record_nodeps_t*) * (trace->_lf_number_of_trace_buffers + 1)); - trace->_lf_trace_buffer++; // the buffer at index -1 is a fallback for user threads. - for (int i = -1; i < (int)trace->_lf_number_of_trace_buffers; i++) { - trace->_lf_trace_buffer[i] = (trace_record_nodeps_t*)malloc(sizeof(trace_record_nodeps_t) * TRACE_BUFFER_CAPACITY); + t->_lf_number_of_trace_buffers = max_num_local_threads; + t->_lf_trace_buffer = + (trace_record_nodeps_t**)malloc(sizeof(trace_record_nodeps_t*) * (t->_lf_number_of_trace_buffers + 1)); + t->_lf_trace_buffer++; // the buffer at index -1 is a fallback for user threads. + for (int i = -1; i < (int)t->_lf_number_of_trace_buffers; i++) { + t->_lf_trace_buffer[i] = (trace_record_nodeps_t*)malloc(sizeof(trace_record_nodeps_t) * TRACE_BUFFER_CAPACITY); } // Array of counters that track the size of each trace record (per thread). - trace->_lf_trace_buffer_size = (size_t*)calloc(sizeof(size_t), trace->_lf_number_of_trace_buffers + 1); - trace->_lf_trace_buffer_size++; + t->_lf_trace_buffer_size = (size_t*)calloc(sizeof(size_t), t->_lf_number_of_trace_buffers + 1); + t->_lf_trace_buffer_size++; - trace->_lf_trace_stop = 0; + t->_lf_trace_stop = 0; LF_PRINT_DEBUG("Started tracing."); }