2023-10-27 10:42:23 -03:00
|
|
|
#include <stdio.h>
|
2023-10-27 11:24:40 -03:00
|
|
|
#include <pthread.h>
|
2023-10-27 12:23:06 -03:00
|
|
|
#include <semaphore.h>
|
|
|
|
#include <stdlib.h>
|
2023-10-27 10:42:23 -03:00
|
|
|
#include "lib/queue.h"
|
|
|
|
|
|
|
|
#define MAX_USERNAME_LENGTH 100
|
2023-11-02 21:08:48 -03:00
|
|
|
int *QUANTUM;
|
2023-10-27 11:24:40 -03:00
|
|
|
int CPUS;
|
2023-10-27 12:23:06 -03:00
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
sem_t print_sem;
|
2023-11-04 23:34:31 -03:00
|
|
|
sem_t *sim_sems;
|
2023-10-30 12:21:07 -03:00
|
|
|
pthread_mutex_t finish_mutex;
|
2023-11-02 18:20:54 -03:00
|
|
|
pthread_mutex_t time_mutex;
|
2023-11-02 22:46:14 -03:00
|
|
|
pthread_mutex_t summary_mutex;
|
|
|
|
|
|
|
|
int finish_count = 0;
|
|
|
|
int TIME = 1;
|
2023-10-27 12:23:06 -03:00
|
|
|
typedef struct ThreadArgs {
|
|
|
|
int cpu_id;
|
|
|
|
char *print_buffer;
|
|
|
|
Queue *summary_queue;
|
|
|
|
Queue *in_queue;
|
|
|
|
} ThreadArgs;
|
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
ThreadArgs *createArgs(int cpu_id, char *print_buffer, Queue *summary_queue, Queue *in_queue) {
|
2023-10-27 12:23:06 -03:00
|
|
|
ThreadArgs *args = malloc(sizeof(ThreadArgs));
|
|
|
|
args->cpu_id = cpu_id;
|
|
|
|
args->print_buffer = print_buffer;
|
|
|
|
args->summary_queue = summary_queue;
|
|
|
|
args->in_queue = in_queue;
|
|
|
|
return args;
|
|
|
|
}
|
2023-10-27 10:42:23 -03:00
|
|
|
|
|
|
|
Queue *input_queue() {
|
2023-10-30 12:21:07 -03:00
|
|
|
Queue *queue = createQueue();
|
|
|
|
char username[MAX_USERNAME_LENGTH]; // username buffer
|
|
|
|
char job;
|
|
|
|
int arrival_time, duration, affinity;
|
2023-10-27 10:42:23 -03:00
|
|
|
|
2023-10-27 11:24:40 -03:00
|
|
|
scanf("%d", &CPUS);
|
|
|
|
while (getchar() != '\n'); // clear the newline from the buffer
|
|
|
|
|
2023-11-02 21:08:48 -03:00
|
|
|
// Allocate dynamic quantum array
|
|
|
|
QUANTUM = malloc(sizeof(int) * CPUS);
|
|
|
|
int i = 0;
|
|
|
|
while (i < CPUS) {
|
|
|
|
scanf("%d", &QUANTUM[i]);
|
|
|
|
i++;
|
|
|
|
}
|
2023-10-30 12:21:07 -03:00
|
|
|
while (getchar() != '\n'); // clear the newline from the buffer
|
2023-10-27 12:23:06 -03:00
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
while (getchar() != '\n'); // ignore the rest of the line, this is the table line
|
|
|
|
// Loop through the process table and enqueue each process
|
|
|
|
while (scanf("%99s %c %d %d %d", username, &job, &arrival_time, &duration, &affinity) != EOF) {
|
|
|
|
Process *process = createProcess(username, job, arrival_time, duration, affinity);
|
|
|
|
enqueue(queue, process);
|
|
|
|
}
|
|
|
|
return queue;
|
2023-10-27 10:42:23 -03:00
|
|
|
}
|
|
|
|
|
2023-11-02 23:36:36 -03:00
|
|
|
// """atomic""" functions
|
2023-11-02 18:20:54 -03:00
|
|
|
int getTime() {
|
2023-11-02 21:08:48 -03:00
|
|
|
pthread_mutex_lock(&time_mutex);
|
|
|
|
int time = TIME;
|
|
|
|
pthread_mutex_unlock(&time_mutex);
|
|
|
|
return time;
|
2023-11-02 18:20:54 -03:00
|
|
|
}
|
2023-11-02 21:08:48 -03:00
|
|
|
|
2023-11-02 18:20:54 -03:00
|
|
|
void incrementTime() {
|
2023-11-02 21:08:48 -03:00
|
|
|
pthread_mutex_lock(&time_mutex);
|
|
|
|
TIME++;
|
|
|
|
pthread_mutex_unlock(&time_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
int getFinishCount() {
|
|
|
|
pthread_mutex_lock(&finish_mutex);
|
|
|
|
int count = finish_count;
|
|
|
|
pthread_mutex_unlock(&finish_mutex);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void incrementFinishCount() {
|
|
|
|
pthread_mutex_lock(&finish_mutex);
|
|
|
|
finish_count++;
|
|
|
|
pthread_mutex_unlock(&finish_mutex);
|
2023-11-02 18:20:54 -03:00
|
|
|
}
|
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
void *print(void *args) {
|
2023-10-27 12:23:06 -03:00
|
|
|
// Cast args and create local variables
|
2023-10-30 12:21:07 -03:00
|
|
|
ThreadArgs *thread_args = (ThreadArgs *) args;
|
2023-10-27 12:23:06 -03:00
|
|
|
char *print_buffer = thread_args->print_buffer;
|
2023-10-27 11:24:40 -03:00
|
|
|
|
|
|
|
// Print the Time label as well as the CPU labels
|
|
|
|
printf("Time");
|
|
|
|
for (int i = 0; i < CPUS; i++) {
|
|
|
|
printf("\tCPU%d", i);
|
|
|
|
}
|
|
|
|
printf("\n");
|
2023-10-27 10:42:23 -03:00
|
|
|
|
2023-11-02 22:46:14 -03:00
|
|
|
|
2023-11-04 23:34:31 -03:00
|
|
|
while (getFinishCount() < CPUS) {
|
2023-11-02 21:08:48 -03:00
|
|
|
// Wait for all the simulation threads to finish
|
2023-11-04 23:34:31 -03:00
|
|
|
for (int i = 0; i < CPUS; ++i) {
|
2023-11-02 21:08:48 -03:00
|
|
|
sem_wait(&print_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
int time = getTime();
|
2023-10-30 12:21:07 -03:00
|
|
|
|
2023-11-02 21:08:48 -03:00
|
|
|
// Print the time and the print buffer
|
2023-11-02 18:20:54 -03:00
|
|
|
printf("%d", time);
|
2023-10-30 12:21:07 -03:00
|
|
|
for (int i = 0; i < CPUS; ++i) {
|
2023-11-02 21:08:48 -03:00
|
|
|
printf("\t%c", print_buffer[i]);
|
2023-11-02 18:20:54 -03:00
|
|
|
}
|
2023-11-02 21:08:48 -03:00
|
|
|
printf("\n");
|
2023-10-30 12:21:07 -03:00
|
|
|
|
2023-11-02 21:08:48 -03:00
|
|
|
// Essentially increase the time right before simulating
|
|
|
|
incrementTime();
|
2023-10-30 12:21:07 -03:00
|
|
|
|
2023-11-02 21:08:48 -03:00
|
|
|
// Increment the simulation semaphore to let the simulation threads run
|
2023-11-04 23:34:31 -03:00
|
|
|
for (int i = 0; i < CPUS; ++i) {
|
|
|
|
sem_post(&sim_sems[i]);
|
2023-11-02 21:08:48 -03:00
|
|
|
}
|
2023-10-27 12:23:06 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Print the summary
|
|
|
|
printf("\nSummary\n");
|
2023-11-02 22:46:14 -03:00
|
|
|
pthread_mutex_lock(&summary_mutex);
|
|
|
|
printList(thread_args->summary_queue);
|
|
|
|
pthread_mutex_unlock(&summary_mutex);
|
2023-11-04 23:34:31 -03:00
|
|
|
|
|
|
|
// let the simulation threads finish
|
|
|
|
// for some reason, if this is not here, the simulation threads will not finish
|
|
|
|
for (int i = 0; i < CPUS; ++i) {
|
|
|
|
sem_post(&sim_sems[i]);
|
|
|
|
}
|
|
|
|
|
2023-10-27 12:23:06 -03:00
|
|
|
return NULL;
|
2023-10-27 11:24:40 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
void *simulation(void *args) {
|
2023-10-27 12:23:06 -03:00
|
|
|
// Cast args and create local variables
|
2023-10-30 12:21:07 -03:00
|
|
|
ThreadArgs *thread_args = (ThreadArgs *) args;
|
2023-10-27 12:23:06 -03:00
|
|
|
Queue *in_queue = thread_args->in_queue;
|
|
|
|
Queue *summary_queue = thread_args->summary_queue;
|
|
|
|
char *print_buffer = thread_args->print_buffer;
|
|
|
|
int cpu_id = thread_args->cpu_id;
|
2023-10-27 11:24:40 -03:00
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
// Loop variables
|
2023-11-02 21:08:48 -03:00
|
|
|
int quantum = QUANTUM[cpu_id];
|
2023-10-30 12:21:07 -03:00
|
|
|
int addedJobs = 0;
|
2023-11-02 21:08:48 -03:00
|
|
|
int numberOfJobsForThisCPU = 0;
|
2023-11-04 23:34:31 -03:00
|
|
|
int time;
|
|
|
|
bool doneSimulating = false;
|
2023-10-27 12:23:06 -03:00
|
|
|
Process *process = NULL;
|
2023-11-02 18:20:54 -03:00
|
|
|
|
2023-11-02 21:08:48 -03:00
|
|
|
// Count number of jobs this CPU has to do
|
|
|
|
process = in_queue->end;
|
|
|
|
for (int i = 0; i < in_queue->size; ++i) {
|
|
|
|
if (process->affinity == cpu_id) {
|
|
|
|
numberOfJobsForThisCPU++;
|
|
|
|
}
|
|
|
|
process = process->prev_elem;
|
|
|
|
}
|
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
// Create a queue for the simulation
|
|
|
|
Queue *sim_queue = createQueue();
|
2023-11-04 23:34:31 -03:00
|
|
|
while (getFinishCount() < CPUS) {
|
2023-11-02 21:08:48 -03:00
|
|
|
// Only simulate if the time has changed
|
2023-11-04 23:34:31 -03:00
|
|
|
sem_wait(&sim_sems[cpu_id]);
|
|
|
|
if (!doneSimulating) {
|
|
|
|
time = getTime();
|
|
|
|
// Begin going through all jobs and enqueueing them if they have arrived
|
|
|
|
process = in_queue->end;
|
|
|
|
for (int i = 0; i < in_queue->size; i++) {
|
|
|
|
if (process->affinity == cpu_id && process->arrival_time == time) {
|
|
|
|
// Create copy to keep the queues separate
|
|
|
|
Process *copy = createProcess(process->username, process->job, process->arrival_time, process->duration, process->affinity);
|
|
|
|
enqueue(sim_queue, copy);
|
|
|
|
addedJobs++;
|
|
|
|
}
|
|
|
|
process = process->prev_elem;
|
2023-11-02 21:08:48 -03:00
|
|
|
}
|
2023-10-30 12:21:07 -03:00
|
|
|
|
2023-11-04 23:34:31 -03:00
|
|
|
// Begin simulating the current job
|
|
|
|
process = sim_queue->end;
|
|
|
|
if (sim_queue->size != 0) {
|
|
|
|
print_buffer[cpu_id] = process->job;
|
|
|
|
process->duration--;
|
|
|
|
quantum--;
|
|
|
|
if (process->duration == 0) { // If the process is done, delete it
|
|
|
|
Process *temp = dequeue(sim_queue); // Store the process in a temp variable for deletion
|
|
|
|
pthread_mutex_lock(&summary_mutex);
|
|
|
|
search(summary_queue, temp->username)->finish_time = time; // Set the finish time for the summary queue
|
|
|
|
pthread_mutex_unlock(&summary_mutex);
|
|
|
|
destroyProcess(temp); // This should be called on every process
|
|
|
|
quantum = QUANTUM[cpu_id]; // Make sure to reset the quantum when a process is done
|
|
|
|
} else if (quantum == 0) { // If the quantum is 0, then we need to dequeue the process and enqueue it again
|
|
|
|
process = dequeue(sim_queue);
|
|
|
|
enqueue(sim_queue, process);
|
|
|
|
quantum = QUANTUM[cpu_id];
|
|
|
|
}
|
|
|
|
} else { //If there is nothing in sim_queue, put '-' in the print buffer
|
|
|
|
print_buffer[cpu_id] = '-';
|
|
|
|
if (addedJobs >= numberOfJobsForThisCPU) {
|
|
|
|
// Need to do this while finish is locked, otherwise the print thread will exit early
|
|
|
|
incrementFinishCount();
|
|
|
|
// If all jobs have been added, and the simulation queue is empty, then we are done
|
|
|
|
doneSimulating = true;
|
|
|
|
}
|
2023-11-02 22:46:14 -03:00
|
|
|
}
|
2023-11-04 23:34:31 -03:00
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
}
|
2023-11-02 21:08:48 -03:00
|
|
|
// Allow the print thread to print because the simulation for this tick is done
|
|
|
|
sem_post(&print_sem);
|
2023-10-30 12:21:07 -03:00
|
|
|
}
|
2023-11-04 23:34:31 -03:00
|
|
|
// Let the print thread one last time
|
|
|
|
sem_post(&print_sem);
|
2023-10-30 12:21:07 -03:00
|
|
|
// Free memory for the simulation queue. There should be nothing left in it
|
|
|
|
stop(sim_queue);
|
2023-11-04 23:34:31 -03:00
|
|
|
|
|
|
|
//printf("sim thread done: %d\n", cpu_id);
|
2023-10-27 12:23:06 -03:00
|
|
|
return NULL;
|
2023-10-27 10:42:23 -03:00
|
|
|
}
|
|
|
|
|
2023-10-27 12:23:06 -03:00
|
|
|
|
2023-10-27 10:42:23 -03:00
|
|
|
int main() {
|
2023-10-30 12:21:07 -03:00
|
|
|
setvbuf(stdout, NULL, _IONBF, 0);
|
|
|
|
Queue *in_queue = input_queue(); // Create the input queue
|
|
|
|
|
|
|
|
// Make sure sem is init right after getting cpus, which is done in input_queue
|
2023-11-02 18:20:54 -03:00
|
|
|
sem_init(&print_sem, 0, 0); // Initialize the semaphore
|
2023-11-04 23:34:31 -03:00
|
|
|
|
|
|
|
// Array of semaphores, one for each CPU
|
|
|
|
sim_sems = malloc(sizeof(sem_t) * CPUS);
|
|
|
|
for (int i = 0; i < CPUS; ++i) {
|
|
|
|
sem_init(&sim_sems[i], 0, 1);
|
|
|
|
}
|
|
|
|
|
2023-10-30 12:21:07 -03:00
|
|
|
pthread_mutex_init(&finish_mutex, NULL); // Initialize the mutex
|
2023-11-02 21:08:48 -03:00
|
|
|
pthread_mutex_init(&time_mutex, NULL); // Initialize the mutex
|
2023-11-02 22:46:14 -03:00
|
|
|
pthread_mutex_init(&summary_mutex, NULL); // Initialize the mutex
|
2023-10-30 12:21:07 -03:00
|
|
|
|
2023-10-27 12:23:06 -03:00
|
|
|
Queue *summary_queue = createQueue(); // Create the summary queue
|
|
|
|
char *print_buffer = malloc(sizeof(char) * CPUS); // Create the print buffer
|
|
|
|
|
|
|
|
// Summary creation
|
|
|
|
Process *process = in_queue->end;
|
|
|
|
for (int i = 0; i < in_queue->size; ++i) {
|
|
|
|
if (contains(summary_queue, process->username) == false) {
|
|
|
|
Process *copy = createProcess(process->username, process->job, process->arrival_time, process->duration, process->affinity);
|
|
|
|
enqueue(summary_queue, copy);
|
|
|
|
}
|
|
|
|
process = process->prev_elem;
|
|
|
|
}
|
2023-11-02 22:46:14 -03:00
|
|
|
// Create the print thread
|
|
|
|
pthread_t print_thread;
|
|
|
|
ThreadArgs *print_args = createArgs(0, print_buffer, summary_queue, in_queue);
|
|
|
|
pthread_create(&print_thread, NULL, &print, print_args);
|
2023-10-27 11:24:40 -03:00
|
|
|
|
|
|
|
// Create the simulation threads
|
|
|
|
pthread_t threads[CPUS];
|
2023-10-30 12:21:07 -03:00
|
|
|
ThreadArgs *args[CPUS]; // Array of arguments for each thread, so we can free them later
|
2023-10-27 11:24:40 -03:00
|
|
|
for (int i = 0; i < CPUS; i++) {
|
2023-10-27 12:23:06 -03:00
|
|
|
args[i] = createArgs(i, print_buffer, summary_queue, in_queue);
|
|
|
|
pthread_create(&threads[i], NULL, &simulation, args[i]);
|
2023-10-27 11:24:40 -03:00
|
|
|
}
|
|
|
|
|
2023-11-04 23:34:31 -03:00
|
|
|
// Wait for print thread to finish
|
2023-11-04 23:48:13 -03:00
|
|
|
pthread_join(print_thread, NULL);
|
2023-11-04 23:34:31 -03:00
|
|
|
free(print_args);
|
|
|
|
|
2023-11-02 21:08:48 -03:00
|
|
|
// Threads simulate, then print
|
2023-10-27 11:24:40 -03:00
|
|
|
for (int i = 0; i < CPUS; i++) {
|
2023-11-04 23:48:13 -03:00
|
|
|
pthread_join(threads[i], NULL);
|
2023-10-27 12:23:06 -03:00
|
|
|
free(args[i]);
|
2023-10-27 11:24:40 -03:00
|
|
|
}
|
2023-11-02 22:46:14 -03:00
|
|
|
|
2023-11-04 23:34:31 -03:00
|
|
|
|
|
|
|
|
|
|
|
// Stop semaphores
|
|
|
|
for (int i = 0; i < CPUS; ++i) {
|
|
|
|
sem_destroy(&sim_sems[i]);
|
|
|
|
}
|
|
|
|
free(sim_sems);
|
|
|
|
sem_destroy(&print_sem);
|
|
|
|
|
|
|
|
|
|
|
|
// Stop mutexes
|
|
|
|
pthread_mutex_destroy(&finish_mutex);
|
|
|
|
pthread_mutex_destroy(&summary_mutex);
|
|
|
|
pthread_mutex_destroy(&time_mutex);
|
|
|
|
|
2023-11-04 23:48:13 -03:00
|
|
|
// Free memory
|
2023-10-30 12:21:07 -03:00
|
|
|
stop(in_queue); // Free memory for input queue
|
2023-10-27 12:23:06 -03:00
|
|
|
stop(summary_queue); // Free memory for summary queue
|
|
|
|
free(print_buffer); // Free memory for print buffer
|
2023-11-02 21:08:48 -03:00
|
|
|
free(QUANTUM); // Free memory for quantum array
|
2023-11-04 23:34:31 -03:00
|
|
|
|
|
|
|
return EXIT_SUCCESS;
|
2023-10-27 10:42:23 -03:00
|
|
|
}
|