-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrtos_priority_ceiling.cpp
448 lines (371 loc) · 13.4 KB
/
rtos_priority_ceiling.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
/*
* Real-Time Operationg Systems
* 4 threads managed with Semaphores (Priority Ceiling Policy)
*
* by Simone Contorno
*/
// Required headers
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
// The code to be executed by each task
void task1_1_code();
void task1_2_code();
void task2_1_code();
void task2_2_code();
void task3_code();
void task4_code();
// The characteristic function of each thread
void *task1(void *);
void *task2(void *);
void *task3(void *);
void *task4(void *);
// Constants
#define NEXEC 30 // Change this value to decrease or increase the number of execution for each task
#define NPERIODICTASKS 4
#define NAPERIODICTASKS 0
#define NTASKS NPERIODICTASKS + NAPERIODICTASKS
// Global variables
long int periods[NTASKS];
struct timespec next_arrival_time[NTASKS];
double WCET[NTASKS];
pthread_attr_t attributes[NTASKS];
pthread_t thread_id[NTASKS];
struct sched_param parameters[NTASKS];
int missed_deadlines[NTASKS];
// Shared resources
int T1T2 = 0;
int T1T4 = 0;
int T2T3 = 0;
// Execution counter
int counter = 1;
// Semaphores
pthread_mutex_t mutex1;
pthread_mutex_t mutex2;
pthread_mutex_t mutex3;
pthread_mutexattr_t mutexAttr; // Semaphore's attributes
int main() {
printf("\n"); fflush(stdout);
/*
* Set periods of each periodic task in nanoseconds
* Order them from the least period (highest priority) to the highest period (least priority)
*/
periods[0] = 80000000; // 0.08 sec
periods[1] = 100000000; // 0.1 sec
periods[2] = 160000000; // 0.16 sec
periods[3] = 200000000; // 0.2 sec
// Read maximum and minimum priorities
struct sched_param priomax;
priomax.sched_priority = sched_get_priority_max(SCHED_FIFO);
struct sched_param priomin;
priomin.sched_priority = sched_get_priority_min(SCHED_FIFO);
// Set the maximum priority to the current thread (you are required to be superuser)
if (getuid() == 0) pthread_setschedparam(pthread_self(), SCHED_FIFO, &priomax);
/*
* Execute all tasks in standalone modality in order to measure execution times (use gettimeofday).
* Use the computed values to update the worst case execution time of each task.
*/
for (int i = 0; i < NTASKS; i++) {
WCET[i] = 0;
for (int j = 0; j < NEXEC; j++) {
// Declare time variables
struct timespec time_1, time_2;
// Execute the task to estimate its WCET
clock_gettime(CLOCK_REALTIME, &time_1);
if (i == 0) {
task1_1_code();
task1_2_code();
}
if (i == 1) {
task2_1_code();
task2_2_code();
}
if (i == 2) task3_code();
if (i == 3) task4_code();
clock_gettime(CLOCK_REALTIME, &time_2);
// Compute the Worst Case Execution Time
double WCET_temp = 1000000000 * (time_2.tv_sec - time_1.tv_sec) + (time_2.tv_nsec-time_1.tv_nsec);
if (WCET_temp > WCET[i])
WCET[i] = WCET_temp;
}
}
counter = 1; // Reset counter
double BWCET[NTASKS];
double B[NTASKS];
// Compute the blocking time for thread 1
for (int i = 0; i < NTASKS; i++) {
for (int j = 0; j < NEXEC; j++) {
// Declare time variables
struct timespec time_1, time_2;
// Execute the task to estimate its WCET
clock_gettime(CLOCK_REALTIME, &time_1);
if (i == 0) task2_1_code(); // Thread 1
if (i == 1 or i == 2) task4_code(); // Thread 1 and Thread 2
if (i == 3) task3_code(); // Thread 2
clock_gettime(CLOCK_REALTIME, &time_2);
// Compute the Worst Case Execution Time
double BWCET_temp = 1000000000 * (time_2.tv_sec - time_1.tv_sec) + (time_2.tv_nsec-time_1.tv_nsec);
if (BWCET_temp > BWCET[i])
BWCET[i] = BWCET_temp;
}
}
// Assign the worst case for each thread
if (BWCET[0] > BWCET[1])
B[0] = BWCET[0];
else
B[0] = BWCET[1];
if (BWCET[2] > BWCET[3])
B[1] = BWCET[2];
else
B[1] = BWCET[3];
B[2] = WCET[3];
B[3] = 0;
// Clear the console
system("clear");
// Print all Worst Case Execution Time
for (int i = 0; i < NTASKS; i++) {
printf("Worst Case Execution Time [%i] = %f [ns]\n", i+1, WCET[i]);
fflush(stdout);
}
printf("\n");
fflush(stdout);
// Print all Blocking time
for (int i = 0; i < NTASKS; i++) {
printf("Blocking time thread [%i] = %f\n", i+1, B[i]);
fflush(stdout);
}
printf("\n");
fflush(stdout);
// Compute U, Ulub and check for schedulability
double U, Ulub;
int flag = 0;
for (int i = 0; i < NTASKS; i++) {
// Set to 0 U and Ulub
U = 0;
Ulub = 0;
// Compute U for thread i
for (int j = 0; j <= i; j++) {
U += WCET[j] / periods[j];
}
U += B[i] / periods[i];
// Compute Ulub for thread i
Ulub = (i+1) * (pow(2.0,(1.0 / (i+1))) - 1);
// Print the result
printf("[%i] U = %lf ; Ulub = %lf\n", i+1, U, Ulub);
fflush(stdout);
// Check for schedulability
if (U > Ulub) {
flag = 1;
break;
}
}
if (flag == 0) {
printf("--> Task set schedulable.\n");
fflush(stdout);
}
else {
printf("--> Task set not schedulable.\n");
fflush(stdout);
}
// Set the minimum priority to the current thread
if (getuid() == 0)
pthread_setschedparam(pthread_self(), SCHED_FIFO, &priomin);
// Set the attributes of each task, including scheduling policy and priority
for (int i = 0; i < NPERIODICTASKS; i++) {
// Initialize the attribute structure of task i
pthread_attr_init(&(attributes[i]));
// Set the attributes to tell the kernel that the priorities and policies are explicitly chosen
pthread_attr_setinheritsched(&(attributes[i]), PTHREAD_EXPLICIT_SCHED);
// Set the attributes to set the SCHED_FIFO policy
pthread_attr_setschedpolicy(&(attributes[i]), SCHED_FIFO);
// Properly set the parameters to assign the priority inversely proportional to the period
parameters[i].sched_priority = priomin.sched_priority + NTASKS - i;
// Set the attributes and the parameters of the current thread
pthread_attr_setschedparam(&(attributes[i]), &(parameters[i]));
}
// Set attributes to the semaphores (Priority Ceiling)
pthread_mutexattr_init(&mutexAttr);
pthread_mutexattr_setprotocol(&mutexAttr, PTHREAD_PRIO_PROTECT);
pthread_mutexattr_setprioceiling(&mutexAttr, priomin.sched_priority + NTASKS - 0);
pthread_mutex_init(&mutex1, &mutexAttr);
pthread_mutexattr_setprioceiling(&mutexAttr, priomin.sched_priority + NTASKS - 0);
pthread_mutex_init(&mutex2, &mutexAttr);
pthread_mutexattr_setprioceiling(&mutexAttr, priomin.sched_priority + NTASKS - 1);
pthread_mutex_init(&mutex3, &mutexAttr);
// Declare time variables
struct timespec time_1;
clock_gettime(CLOCK_REALTIME, &time_1);
// Set the next arrival time for each task
for (int i = 0; i < NPERIODICTASKS; i++) {
/*
* 1. First we encode the current time in nanoseconds and add the period
* 2. By assuming that the current time is the beginning of the first period, compute the beginning of the second period
*/
long int next_arrival_nanoseconds = time_1.tv_nsec + periods[i];
next_arrival_time[i].tv_nsec = next_arrival_nanoseconds % 1000000000;
next_arrival_time[i].tv_sec = time_1.tv_sec + next_arrival_nanoseconds / 1000000000;
missed_deadlines[i] = 0;
}
// Declare the variable to contain the return values of pthread_create
int iret[NTASKS];
// Create all threads
iret[0] = pthread_create(&(thread_id[0]), &(attributes[0]), task1, NULL);
iret[1] = pthread_create(&(thread_id[1]), &(attributes[1]), task2, NULL);
iret[2] = pthread_create(&(thread_id[2]), &(attributes[2]), task3, NULL);
iret[3] = pthread_create(&(thread_id[3]), &(attributes[3]), task4, NULL);
// Join all threads
for (int i = 0; i < NTASKS; i++)
pthread_join(thread_id[i], NULL);
// Print deadlines values
for (int i = 0; i < NTASKS; i++) {
printf ("\nMissed Deadlines Task %d = %d", i+1, missed_deadlines[i]);
fflush(stdout);
}
printf ("\n\n"); fflush(stdout);
return 0;
}
// Task 1_1 - Write into T1T2
void task1_1_code() {
printf("\nExecution %i/%i:\n[1] --> ", counter, NEXEC); fflush(stdout);
T1T2 = rand() % 10;
printf("%i (W)\n", T1T2); fflush(stdout);
counter++;
}
// Task 1_2 - Write into T1T4
void task1_2_code() {
T1T4 = rand() % 10;
printf("[1] --> %i (W)\n", T1T4); fflush(stdout);
}
// Thread 1 - Use mutex1 and mutex2
void *task1(void *ptr) {
// Set thread affinity
cpu_set_t cset;
CPU_ZERO (&cset);
CPU_SET(0, &cset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cset);
for (int i = 0; i < NEXEC; i++) {
// Go into the critical sections protected by mutex 1 and execute task 1_1
pthread_mutex_lock(&mutex1);
task1_1_code();
pthread_mutex_unlock(&mutex1);
// Go into the critical sections protected by mutex 2 and execute task 1_2
pthread_mutex_lock(&mutex2);
task1_2_code();
pthread_mutex_unlock(&mutex2);
// Declare time variables and get time
struct timespec time;
clock_gettime(CLOCK_REALTIME, &time);
// After execution, compute the time before the next period starts
long int timetowait = 1000 * ((next_arrival_time[0].tv_sec - time.tv_sec) * 1000000 + (next_arrival_time[0].tv_sec - time.tv_sec) * 1000000);
if (timetowait < 0) missed_deadlines[0]++;
// Sleep until the next arrival time and compute the next one
clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &next_arrival_time[0], NULL);
long int next_arrival_nanoseconds = next_arrival_time[0].tv_nsec + periods[0];
next_arrival_time[0].tv_nsec = next_arrival_nanoseconds % 1000000000;
next_arrival_time[0].tv_sec = next_arrival_time[0].tv_sec + next_arrival_nanoseconds / 1000000000;
}
return 0;
}
// Task 2_1 - Read from T1T2
void task2_1_code() {
printf("[2] --> %i (R)\n", T1T2); fflush(stdout);
}
// Task 2_2 - Write into T2T3
void task2_2_code() {
T2T3 = rand() % 10;
printf("[2] --> %i (W)\n", T2T3); fflush(stdout);
}
// Thread 2 - Use mutex3 and mutex1
void *task2(void *ptr ) {
// Set thread affinity
cpu_set_t cset;
CPU_ZERO (&cset);
CPU_SET(0, &cset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cset);
for (int i = 0; i < NEXEC; i++) {
// Go into the critical sections protected by mutex 3 and execute task 2_2
pthread_mutex_lock(&mutex3);
task2_2_code();
pthread_mutex_unlock(&mutex3);
// Go into the critical sections protected by mutex 1 and execute task 2_1
pthread_mutex_lock(&mutex1);
task2_1_code();
pthread_mutex_unlock(&mutex1);
// Declare time variables and get time
struct timespec time;
clock_gettime(CLOCK_REALTIME, &time);
// After execution, compute the time before the next period starts
long int timetowait = 1000 * ((next_arrival_time[1].tv_sec - time.tv_sec) * 1000000 + (next_arrival_time[1].tv_sec - time.tv_sec) * 1000000);
if (timetowait < 0) missed_deadlines[1]++;
// Sleep until the next arrival time and compute the next one
clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &next_arrival_time[1], NULL);
long int next_arrival_nanoseconds = next_arrival_time[1].tv_nsec + periods[1];
next_arrival_time[1].tv_nsec = next_arrival_nanoseconds % 1000000000;
next_arrival_time[1].tv_sec = next_arrival_time[1].tv_sec + next_arrival_nanoseconds / 1000000000;
}
return 0;
}
// Task 3 - Read from T2T3
void task3_code() {
printf("[3] --> %d (R)\n", T2T3); fflush(stdout);
}
// Thread 3 - Use mutex3
void *task3(void *ptr) {
// Set thread affinity
cpu_set_t cset;
CPU_ZERO (&cset);
CPU_SET(0, &cset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cset);
for (int i = 0; i < NEXEC; i++) {
// Go into the critical section protected by mutex 2 and execute task 3
pthread_mutex_lock(&mutex3);
task3_code();
pthread_mutex_unlock(&mutex3);
// Declare time variables and get time
struct timespec time;
clock_gettime(CLOCK_REALTIME, &time);
// After execution, compute the time before the next period starts
long int timetowait = 1000 * ((next_arrival_time[2].tv_sec - time.tv_sec) * 1000000 + (next_arrival_time[2].tv_sec - time.tv_sec) * 1000000);
if (timetowait < 0) missed_deadlines[2]++;
// Sleep until the next arrival time and compute the next one
clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &next_arrival_time[2], NULL);
long int next_arrival_nanoseconds = next_arrival_time[2].tv_nsec + periods[2];
next_arrival_time[2].tv_nsec = next_arrival_nanoseconds % 1000000000;
next_arrival_time[2].tv_sec = next_arrival_time[2].tv_sec + next_arrival_nanoseconds / 1000000000;
}
return 0;
}
// Task 4 - Read from T1T4
void task4_code() {
printf("[4] --> %i (R)\n", T1T4); fflush(stdout);
}
// Thread 4 - Use mutex2
void *task4(void *ptr) {
// Set thread affinity
cpu_set_t cset;
CPU_ZERO (&cset);
CPU_SET(0, &cset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cset);
for (int i = 0; i < NEXEC; i++) {
// Go into the critical section protected by mutex 1 and execute task 4
pthread_mutex_lock(&mutex2);
task4_code();
pthread_mutex_unlock(&mutex2);
// Declare time variables and get time
struct timespec time;
clock_gettime(CLOCK_REALTIME, &time);
// After execution, compute the time before the next period starts
long int timetowait = 1000 * ((next_arrival_time[3].tv_sec - time.tv_sec) * 1000000 + (next_arrival_time[3].tv_sec - time.tv_sec) * 1000000);
if (timetowait < 0) missed_deadlines[3]++;
// Sleep until the next arrival time and compute the next one
clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &next_arrival_time[3], NULL);
long int next_arrival_nanoseconds = next_arrival_time[3].tv_nsec + periods[3];
next_arrival_time[3].tv_nsec = next_arrival_nanoseconds % 1000000000;
next_arrival_time[3].tv_sec = next_arrival_time[3].tv_sec + next_arrival_nanoseconds / 1000000000;
}
return 0;
}