Add VeriFast kernel queue proofs (#117)

pull/121/head
Nathan Chong 5 years ago committed by GitHub
parent d5fedeaa96
commit 529c481c39
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,4 +6,5 @@ For more information on FreeRTOS testing please refer to https://www.freertos.or
## Directory structure ## Directory structure
This directory is in working progress -- we are migrating scattered test cases to this directory. Here only lists what's currently under this directory. This directory is in working progress -- we are migrating scattered test cases to this directory. Here only lists what's currently under this directory.
- ```./CBMC```: This directory contains automated proofs of the memory safety of various parts of the FreeRTOS code base. - ```./CBMC```: This directory contains automated proofs of the memory safety of various parts of the FreeRTOS code base.
- ```./VeriFast```: This directory contains automated proofs of the functional correctness of various parts of the FreeRTOS code base.

@ -0,0 +1,52 @@
VERIFAST ?= verifast
VERIFAST_ARGS = -I include -c $(EXTRA_VERIFAST_ARGS)
ifeq ($(NO_COVERAGE), 1)
check_coverage = cat
else
check_coverage = perl -pe \
'END { \
if ($$status) { \
print "Coverage regression failed: Expected $1 statements verified.\n"; \
} \
exit $$status; \
} \
$$status=/$1 statements verified/ ? 0 : 1;'
endif
all: queue
.PHONY: queue
queue:
@$(VERIFAST) $(VERIFAST_ARGS) queue/create.c | $(call check_coverage,269)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvCopyDataFromQueue.c | $(call check_coverage,253)
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/prvCopyDataToQueue.c | $(call check_coverage,280)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueEmpty.c | $(call check_coverage,234)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvIsQueueFull.c | $(call check_coverage,234)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvLockQueue.c | $(call check_coverage,235)
@$(VERIFAST) $(VERIFAST_ARGS) queue/prvUnlockQueue.c | $(call check_coverage,249)
@$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueMessagesWaiting.c | $(call check_coverage,237)
@$(VERIFAST) $(VERIFAST_ARGS) queue/uxQueueSpacesAvailable.c | $(call check_coverage,235)
@$(VERIFAST) $(VERIFAST_ARGS) queue/vQueueDelete.c | $(call check_coverage,232)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueGenericSend.c | $(call check_coverage,280)
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueGenericSendFromISR.c | $(call check_coverage,262)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueEmptyFromISR.c | $(call check_coverage,232)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueIsQueueFullFromISR.c | $(call check_coverage,232)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeek.c | $(call check_coverage,280)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueuePeekFromISR.c | $(call check_coverage,245)
@$(VERIFAST) $(VERIFAST_ARGS) queue/xQueueReceive.c | $(call check_coverage,282)
@$(VERIFAST) $(VERIFAST_ARGS) -disable_overflow_check queue/xQueueReceiveFromISR.c | $(call check_coverage,259)
.PHONY: proof_changes
proof_changes:
@git grep "if[n]*def VERIFAST" -- '*.c' | cut -f 3- -d ' ' | sort | uniq
GIT?=git
NO_CHANGE_CHECKOUT_DIR=no-change-check-freertos-kernel
NO_CHANGE_EXPECTED_HASH=4a61f9ff7e2
.PHONY: synced_with_source_check
synced_with_source_check:
@rm -rf $(NO_CHANGE_CHECKOUT_DIR)
@$(GIT) clone --shallow-since="30-06-2020" https://github.com/FreeRTOS/FreeRTOS-Kernel.git $(NO_CHANGE_CHECKOUT_DIR)
@cd $(NO_CHANGE_CHECKOUT_DIR) && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) queue.c
@cd $(NO_CHANGE_CHECKOUT_DIR) && $(GIT) diff --quiet $(NO_CHANGE_EXPECTED_HASH) include/queue.h

@ -0,0 +1,122 @@
# FreeRTOS VeriFast proofs
This directory contains automated functional correctness proofs of the FreeRTOS
kernel queue data structure. These properties are proven with the
[VeriFast](https://github.com/verifast/verifast) verifier.
## Proof directory structure
We split proofs by data structure into separate proof directories. Each file
within a proof directory is a proof of one or more related API functions. A
proof is the source code of the FreeRTOS implementation with VeriFast
annotations (denoted by special comments `/*@ ... @*/`). A set of common
predicates, functions and lemmas used by all proofs is maintained in the
`include/proof` directory.
- `queue`: proofs for the FreeRTOS queue data structure
The following figure gives the callgraph of the queue proofs. Green=Proven
functions, Blue=Functions modeled by lock invariants (underlying implementation
assumed to provide these atomicity guarantees), Grey=Assumed stubs.
![Queue proof callgraph](docs/callgraph.png?raw=true "Queue proofs")
## Prerequisites
Proof checking needs VeriFast and proof regression further needs make and perl.
We recommend installing VeriFast from the nightly builds on the [VeriFast
GitHub page](https://github.com/verifast/verifast). After unpacking the build
tarball, the `verifast` and `vfide` binaries will be in the directory
`verifast-COMMIT/bin/` where `COMMIT` is the Git commit of the VeriFast build.
Note that for CI we use [VeriFast 19.12](https://github.com/verifast/verifast/releases).
## Proof checking a single proof in the VeriFast IDE
To load a proof in the `vfide` VeriFast IDE:
```
$ /path/to/vfide -I include queue/xQueueGenericSend.c
```
Then click `Verify` and `Verify Program` (or press F5). Note that the following
proofs require arithmetic overflow checking to be turned off (click `Verify`
and uncheck `Check arithmetic overflow`).
- `queue/prvCopyDataToQueue.c`
- `queue/xQueueGenericSendFromISR.c`
- `queue/xQueueReceiveFromISR.c`
A successful proof results in the top banner turning green with a statement
similar to: `0 errors found (286 statements verified)`.
## Proof checking a single proof at the command-line
The following is an example of checking a proof using the `verifast`
command-line tool. Turn off arithmetic overflow checking where necessary with
the flag `-disable_overflow_check`.
```
$ /path/to/verifast -I include -c queue/xQueueGenericSend.c
```
A successful proof results in output similar to:
```
queue/xQueueGenericSend.c
0 errors found (286 statements verified)
```
## Running proof regression
The following will check all proofs in the repo along with a statement coverage
regression.
```
$ VERIFAST=/path/to/verifast make
```
If you have made changes to the proofs so the statement coverage no longer
matches then you can temporarily turn off coverage checking:
```
$ VERIFAST=/path/to/verifast NO_COVERAGE=1 make
```
## Annotation burden
VeriFast can emit statistics about the number of source code lines and
annotations.
```
$ VERIFAST=/path/to/verifast ./scripts/annotation_overhead.sh
```
## Reading a VeriFast proof
VeriFast is a modular deductive verifier using separation logic. A quick
introduction is given by [Jacobs and
Piessens](https://people.cs.kuleuven.be/~bart.jacobs/verifast/verifast.pdf).
In particular, Section 1 Introduction, gives a high-level overview of the proof
technique, which uses forward symbolic execution over a symbolic heap.
Learning how to use VeriFast will help you read and understand the proofs.
The VeriFast
[tutorial](https://people.cs.kuleuven.be/~bart.jacobs/verifast/tutorial.pdf) is
a good guide. You will need to understand:
- Sec 4. Functions and Contracts
- Sec 5. Patterns
- Sec 6. Predicates
- Sec 7. Recursive Predicates
- Sec 8. Loops
- Sec 9. Inductive Datatypes
- Sec 10. Inductive Datatypes
- Sec 11. Lemmas
## Contributors
We acknowledge and thank the following contributors, listed alphabetically:
- Bart Jacobs, KU Leuven, https://people.cs.kuleuven.be/~bart.jacobs/

Binary file not shown.

After

Width:  |  Height:  |  Size: 369 KiB

@ -0,0 +1,539 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef COMMON_H
#define COMMON_H
#include <listex.gh>
fixpoint list<t> rotate_left<t>(int n, list<t> xs) {
return append(drop(n, xs), take(n, xs));
}
fixpoint list<t> singleton<t>(t x) {
return cons(x, nil);
}
lemma void note(bool b)
requires b;
ensures b;
{}
lemma_auto void rotate_length<t>(int n, list<t> xs)
requires 0 <= n && n <= length(xs);
ensures length(rotate_left(n, xs)) == length(xs);
{}
lemma void take_length_eq<t>(int k, list<t> xs, list<t> ys)
requires 0 <= k && k <= length(xs) && take(k, xs) == ys;
ensures length(ys) == k;
{}
lemma void leq_bound(int x, int b)
requires b <= x && x <= b;
ensures x == b;
{}
lemma void mul_mono_l_strict(int x, int y, int n)
requires 0 < n &*& x < y;
ensures x * n < y * n;
{
for (int i = 1; i < n; i++)
invariant i <= n &*& x * i < y * i;
decreases n - i;
{}
}
lemma void div_leq(int x, int y, int n)
requires 0 < n && x * n <= y * n;
ensures x <= y;
{
assert x * n <= y * n;
if (x <= y) {
mul_mono_l(x,y,n);
} else {
mul_mono_l_strict(y,x,n); //< contradiction
}
}
lemma void div_lt(int x, int y, int n)
requires 0 < n && x * n < y * n;
ensures x < y;
{
assert x * n <= y * n;
if (x == y) {
} else if (x <= y) {
mul_mono_l(x,y,n);
} else {
assert y < x;
mul_mono_l(y,x,n); //< contradiction
}
}
lemma_auto void mod_same(int n)
requires 0 < n;
ensures n % n == 0;
{
div_rem_nonneg(n, n);
if (n / n < 1) {} else if (n / n > 1) {
mul_mono_l(2, n/n, n);
} else {}
}
lemma void mod_lt(int x, int n)
requires 0 <= x && x < n;
ensures x % n == x;
{
div_rem_nonneg(x, n);
if (x / n > 0) {
mul_mono_l(1, x / n, n);
} else {
}
}
lemma void mod_plus_one(int x, int y, int n)
requires 0 <= y && 0 < n && x == (y % n);
ensures ((x+1) % n) == ((y+1) % n);
{
div_rem_nonneg(y, n);
div_rem_nonneg(y+1, n);
div_rem_nonneg(y%n+1, n);
if (y%n+1 < n) {
mod_lt(y%n+1, n);
assert y%n == y - y/n*n;
assert (y+1)%n == y + 1 - (y + 1)/n*n;
if ((y+1)/n > y/n) {
mul_mono_l(y/n + 1, (y+1)/n, n);
} else if ((y+1)/n < y/n) {
mul_mono_l((y+1)/n + 1, y/n, n);
}
assert y - (y+1)/n*n == y - y/n*n;
assert y+1 - (y+1)/n*n == y - y/n*n + 1;
assert (y+1)%n == y%n + 1;
} else {
assert y%n+1 == n;
assert (y%n+1)%n == 0;
if (y/n + 1 < (y+1)/n) {
mul_mono_l(y/n + 2, (y+1)/n, n);
} else if (y/n + 1 > (y+1)/n) {
mul_mono_l((y+1)/n, y/n, n);
}
assert (y+1)/n == y/n + 1;
note((y+1)/n*n == (y/n + 1)*n);
assert (y+1)%n == 0;
}
assert (y%n+1)%n == (y+1)%n;
}
lemma void mod_mul(int x, int n, int y)
requires 0 < n && 0 <= x && 0 <= y;
ensures (x*n + y)%n == y%n;
{
mul_mono_l(0, x, n);
div_rem_nonneg(x*n+y, n);
div_rem_nonneg(y, n);
if ((x*n+y)/n > x + y/n) {
mul_mono_l(x + y/n + 1, (x*n+y)/n, n);
} else if ((x*n+y)/n < x + y/n) {
mul_mono_l((x*n+y)/n + 1, x + y/n, n);
}
note((x*n + y)/n == x + y/n);
note((x*n + y)/n*n == (x + y/n)*n);
}
lemma void mod_plus_distr(int x, int y, int n)
requires 0 < n && 0 <= x && 0 <= y;
ensures ((x % n) + y) % n == (x + y) % n;
{
div_rem_nonneg(x, n);
div_rem_nonneg(x%n+y, n);
div_rem_nonneg(x+y, n);
assert x == x/n*n + x%n;
mod_mul(x/n, n, x%n + y);
}
lemma_auto void mod_mod(int x, int n)
requires 0 < n && 0 <= x;
ensures (x % n) % n == (x % n);
{
mod_plus_distr(x, 0, n);
}
lemma void mod_plus(int x, int y, int n);
requires 0 < n && 0 <= x && 0 <= y;
ensures (x + y) % n == ((x % n) + (y % n)) % n;
lemma_auto void mod_range(int x, int n)
requires 0 <= x && 0 < n;
ensures 0 <= (x % n) && (x % n) < n;
{
div_rem_nonneg(x, n);
}
lemma void drop_update_le<t>(int i, int j, t x, list<t> xs)
requires 0 <= i && i <= j && j < length(xs);
ensures drop(i, update(j, x, xs)) == update(j - i, x, drop(i, xs));
{
switch (xs) {
case nil:
case cons(x0, xs0):
if (i == 0) {
} else {
drop_update_le(i - 1, j - 1, x, xs0);
}
}
}
lemma void drop_update_ge<t>(int i, int j, t x, list<t> xs)
requires 0 <= j && j < i && i < length(xs);
ensures drop(i, update(j, x, xs)) == drop(i, xs);
{
switch (xs) {
case nil:
case cons(x0, xs0):
if (j == 0) {
} else {
drop_update_ge(i - 1, j - 1, x, xs0);
}
}
}
lemma void take_update_le<t>(int i, int j, t x, list<t> xs)
requires 0 <= i && i <= j;
ensures take(i, update(j, x, xs)) == take(i, xs);
{
switch (xs) {
case nil:
case cons(x0, xs0):
if (i == 0) {
} else {
take_update_le(i - 1, j - 1, x, xs0);
}
}
}
lemma void take_update_ge<t>(int i, int j, t x, list<t> xs)
requires 0 <= j && j < i && i <= length(xs);
ensures take(i, update(j, x, xs)) == update(j, x, take(i, xs));
{
switch (xs) {
case nil:
case cons(x0, xs0):
if (j == 0) {
} else {
take_update_ge(i - 1, j - 1, x, xs0);
}
}
}
lemma void update_eq_append<t>(int i, t x, list<t> xs)
requires 0 <= i && i < length(xs);
ensures update(i, x, xs) == append(take(i, xs), cons(x, drop(i + 1, xs)));
{
switch (xs) {
case nil:
case cons(x0, xs0):
if (i == 0) {
} else {
update_eq_append(i - 1, x, xs0);
}
}
}
lemma void take_append_ge<t>(int n, list<t> xs, list<t> ys)
requires length(xs) <= n;
ensures take(n, append(xs, ys)) == append(xs, take(n - length(xs), ys));
{
switch (xs) {
case nil:
case cons(x0, xs0):
take_append_ge(n - 1, xs0, ys);
}
}
lemma void drop_drop<t>(int m, int n, list<t> xs)
requires 0 <= m && 0 <= n;
ensures drop(m, drop(n, xs)) == drop(m+n, xs);
{
switch (xs) {
case nil:
case cons(x0, xs0):
if (n == 0) {} else {
drop_drop(m, n-1, xs0);
}
}
}
lemma void take_take<t>(int m, int n, list<t> xs)
requires 0 <= m && m <= n && n <= length(xs);
ensures take(m, take(n, xs)) == take(m, xs);
{
switch (xs) {
case nil:
case cons(x0, xs0):
if (m == 0) {} else {
take_take(m - 1, n - 1, xs0);
}
}
}
lemma void index_of_distinct<t>(list<t> xs, t x1, t x2)
requires mem(x1, xs) == true &*& mem(x2, xs) == true &*& x1 != x2;
ensures index_of(x1, xs) != index_of(x2, xs);
{
switch (xs) {
case nil:
case cons(h, tl):
if (h == x1 || h == x2) {
/* trivial */
} else {
index_of_distinct(tl, x1, x2);
}
}
}
lemma void remove_remove_nth<t>(list<t> xs, t x)
requires mem(x, xs) == true;
ensures remove(x, xs) == remove_nth(index_of(x, xs), xs);
{
switch (xs) {
case nil:
case cons(h, tl):
if (x == h) {
assert index_of(x, xs) == 0;
} else {
remove_remove_nth(tl, x);
}
}
}
lemma void enq_lemma<t>(int k, int i, list<t> xs, list<t> ys, t z)
requires 0 <= k && 0 <= i && 0 < length(xs) && k < length(xs) && i < length(xs) && take(k, rotate_left(i, xs)) == ys;
ensures take(k+1, rotate_left(i, update((i+k)%length(xs), z, xs))) == append(ys, cons(z, nil));
{
int j = (i+k)%length(xs);
assert take(k, append(drop(i, xs), take(i, xs))) == ys;
if (i + k < length(xs)) {
mod_lt(i + k, length(xs));
assert j == i + k;
drop_update_le(i, i + k, z, xs);
assert drop(i, update(i + k, z, xs)) == update(k, z, drop(i, xs));
take_update_le(i, i + k, z, xs);
assert take(i, update(i + k, z, xs)) == take(i, xs);
take_append(k+1, update(k, z, drop(i, xs)), take(i, xs));
assert take(k+1, append(update(k, z, drop(i, xs)), take(i, xs))) == take(k+1, update(k, z, drop(i, xs)));
update_eq_append(k, z, drop(i, xs));
assert update(k, z, drop(i, xs)) == append(take(k, drop(i, xs)), cons(z, drop(k + 1, drop(i, xs))));
take_append_ge(k+1, take(k, drop(i, xs)), cons(z, drop(k + 1, drop(i, xs))));
assert take(k+1, append(take(k, drop(i, xs)), cons(z, drop(k + 1, drop(i, xs))))) ==
append(take(k, drop(i, xs)), {z});
take_append(k, drop(i, xs), take(i, xs));
assert take(k+1, append(take(k, drop(i, xs)), cons(z, drop(k + 1, drop(i, xs))))) ==
append(take(k, append(drop(i, xs), take(i, xs))), {z});
assert take(k+1, update(k, z, drop(i, xs))) ==
append(take(k, append(drop(i, xs), take(i, xs))), {z});
assert take(k+1, append(update(k, z, drop(i, xs)), take(i, xs))) ==
append(take(k, append(drop(i, xs), take(i, xs))), {z});
assert take(k+1, append(drop(i, update(i + k, z, xs)), take(i, update(i + k, z, xs)))) ==
append(take(k, append(drop(i, xs), take(i, xs))), {z});
} else {
assert i + k < 2 * length(xs);
div_rem_nonneg(i + k, length(xs));
if ((i + k) / length(xs) > 1) {
mul_mono_l(2, (i + k) / length(xs), length(xs));
} else if ((i + k) / length(xs) < 1) {
mul_mono_l((i + k) / length(xs), 0, length(xs));
}
assert j == i + k - length(xs);
assert j < i;
drop_update_ge(i, j, z, xs);
assert drop(i, update(j, z, xs)) == drop(i, xs);
take_update_ge(i, j, z, xs);
assert update(j, z, take(i, xs)) == take(i, update(j, z, xs));
take_append_ge(k+1, drop(i, xs), take(i, update(j, z, xs)));
assert take(k+1, append(drop(i, update(j, z, xs)), take(i, update(j, z, xs)))) ==
append(drop(i, xs), take(j+1, update(j, z, take(i, xs))));
update_eq_append(j, z, take(i, xs));
assert update(j, z, take(i, xs)) == append(take(j, take(i, xs)), cons(z, drop(j + 1, take(i, xs))));
take_take(j, i, xs);
assert update(j, z, take(i, xs)) == append(take(j, xs), cons(z, drop(j+1, take(i, xs))));
take_append_ge(j+1, take(j, xs), cons(z, drop(j+1, take(i, xs))));
assert append(drop(i, xs), take(j+1, update(j, z, take(i, xs)))) ==
append(drop(i, xs), append(take(j, xs), {z}));
take_append_ge(k, drop(i, xs), take(i, xs));
append_assoc(drop(i, xs), take(j, xs), {z});
assert append(drop(i, xs), append(take(j, xs), {z})) ==
append(take(k, append(drop(i, xs), take(i, xs))), {z});
assert append(drop(i, xs), take(j+1, update(j, z, take(i, xs)))) ==
append(take(k, append(drop(i, xs), take(i, xs))), {z});
}
assert take(k+1, append(drop(i, update(j, z, xs)), take(i, update(j, z, xs)))) ==
append(take(k, append(drop(i, xs), take(i, xs))), {z});
assert take(k+1, append(drop(i, update(j, z, xs)), take(i, update(j, z, xs)))) == append(ys, {z});
}
lemma void front_enq_lemma<t>(int k, int i, list<t> xs, list<t> ys, t z)
requires 0 < length(xs) && 0 <= k && k < length(xs) && 0 <= i && i < length(xs) && take(k, rotate_left((i+1)%length(xs), xs)) == ys;
ensures take(k+1, rotate_left(i, update(i, z, xs))) == cons(z, ys);
{
int n = length(xs);
if (i+1 < n) {
mod_lt(i+1, n);
assert i < n;
assert take(k+1, rotate_left(i, update(i, z, xs))) ==
take(k+1, append(drop(i, update(i, z, xs)), take(i, update(i, z, xs))));
drop_update_le(i, i, z, xs);
take_update_le(i, i, z, xs);
assert take(k+1, append(drop(i, update(i, z, xs)), take(i, update(i, z, xs)))) ==
take(k+1, append(update(0, z, drop(i, xs)), take(i, xs)));
update_eq_append(0, z, drop(i, xs));
assert update(0, z, drop(i, xs)) == cons(z, drop(1, drop(i, xs)));
drop_drop(1, i, xs);
assert take(k+1, append(update(0, z, drop(i, xs)), take(i, xs))) ==
take(k+1, append(cons(z, drop(i+1, xs)), take(i, xs)));
assert take(k+1, append(cons(z, drop(i+1, xs)), take(i, xs))) ==
cons(z, take(k, append(drop(i+1, xs), take(i, xs))));
assert ys == take(k, rotate_left(i+1, xs));
assert ys == take(k, append(drop(i+1, xs), take(i+1, xs)));
if (k <= length(drop(i+1, xs))) {
take_append(k, drop(i+1, xs), take(i+1, xs));
take_append(k, drop(i+1, xs), take(i, xs));
} else {
take_append_ge(k, drop(i+1, xs), take(i+1, xs));
take_append_ge(k, drop(i+1, xs), take(i, xs));
assert (i+1) + k < 2 * n;
div_rem_nonneg((i+1) + k, n);
if (((i+1) + k) / n > 1) {
mul_mono_l(2, ((i+1) + k) / n, n);
} else if (((i+1) + k) / n < 1) {
mul_mono_l(((i+1) + k) / n, 0, n);
}
int j = ((i+1)+k)%n;
assert j <= i;
int l = length(drop(i+1, xs));
assert l == n - i - 1;
take_take(k - l, i + 1, xs);
take_take(k - l, i, xs);
}
} else {
assert i == (n-1);
assert (i + 1) % n == 0;
drop_update_le(i, i, z, xs);
update_eq_append(0, z, xs);
assert take(k+1, rotate_left(i, update(i, z, xs))) ==
take(k+1, append(drop(i, update(i, z, xs)), take(i, update(i, z, xs))));
drop_update_le(i, i, z, xs);
assert take(k+1, rotate_left(i, update(i, z, xs))) ==
take(k+1, append(update(0, z, drop(i, xs)), take(i, update(i, z, xs))));
update_eq_append(0, z, drop(i, xs));
assert take(k+1, rotate_left(i, update(i, z, xs))) ==
take(k+1, append(cons(z, drop(1, drop(i, xs))), take(i, update(i, z, xs))));
drop_drop(1, i, xs);
assert take(k+1, rotate_left(i, update(i, z, xs))) ==
take(k+1, append(cons(z, nil), take(i, update(i, z, xs))));
take_update_le(i, i, z, xs);
assert take(k+1, rotate_left(i, update(i, z, xs))) ==
cons(z, take(k, take(i, xs)));
take_take(k, i, xs);
assert take(k+1, rotate_left(i, update(i, z, xs))) == cons(z, ys);
}
}
lemma void deq_lemma<t>(int k, int i, list<t> xs, list<t> ys, t z)
requires 0 < k && k <= length(xs) && 0 <= i && i < length(xs) && take(k, rotate_left(i, xs)) == ys && z == head(ys);
ensures take(k-1, rotate_left((i+1)%length(xs), xs)) == tail(ys);
{
int j = (i+1)%length(xs);
drop_n_plus_one(i, xs);
assert tail(take(k, append(drop(i, xs), take(i, xs)))) == take(k-1, append(drop(i+1, xs), take(i, xs)));
if (i+1 < length(xs)) {
mod_lt(i+1, length(xs));
assert j == i+1;
if (k-1 <= length(xs)-j) {
take_append(k-1, drop(j, xs), take(j, xs));
take_append(k-1, drop(j, xs), take(i, xs));
} else {
assert k+i > length(xs);
take_append_ge(k-1, drop(j, xs), take(j, xs));
take_append_ge(k-1, drop(j, xs), take(i, xs));
assert k-1-(length(xs)-j) == k+i-length(xs);
assert k+i-length(xs) <= i;
take_take(k+i-length(xs), j, xs);
take_take(k+i-length(xs), i, xs);
assert take(k+i-length(xs), take(j, xs)) == take(k+i-length(xs), take(i, xs));
}
} else {
assert i+1 == length(xs);
assert (i+1)%length(xs) == 0;
assert j == 0;
assert append(drop(j, xs), take(j, xs)) == xs;
assert append(drop(i+1, xs), take(i, xs)) == take(i, xs);
take_append_ge(k-1, drop(i+1, xs), take(i, xs));
take_take(k-1, i, xs);
}
assert take(k-1, append(drop(j, xs), take(j, xs))) == take(k-1, append(drop(i+1, xs), take(i, xs)));
assert take(k-1, append(drop(j, xs), take(j, xs))) == tail(take(k, append(drop(i, xs), take(i, xs))));
}
lemma void deq_value_lemma<t>(int k, int i, list<t> xs, list<t> ys)
requires 0 < k && k <= length(ys) && 0 <= i && i < length(xs) && take(k, rotate_left(i, xs)) == ys;
ensures nth(i, xs) == head(ys);
{
drop_n_plus_one(i, xs);
assert nth(i, xs) == head(take(k, append(drop(i, xs), take(i, xs))));
}
lemma void combine_list_no_change<t>(list<t>prefix, t x, list<t>suffix, int i, list<t> xs)
requires 0 <= i && i < length(xs) && prefix == take(i, xs) && x == nth(i, xs) && suffix == drop(i+1, xs);
ensures xs == append(prefix, cons(x, suffix));
{
drop_n_plus_one(i, xs);
}
/* Following lemma from `verifast/examples/vstte2010/problem4.java`. */
lemma void update_rewrite<t>(list<t> vs, t v, int pos)
requires 0 <= pos && pos < length(vs);
ensures update(pos, v, vs) == append(take(pos, vs), cons(v, (drop(pos+1, vs))));
{
switch(vs) {
case nil:
case cons(h, t):
if(pos == 0) {
} else {
update_rewrite(t, v, pos - 1);
}
}
}
lemma void combine_list_update<t>(list<t>prefix, t x, list<t>suffix, int i, list<t> xs)
requires 0 <= i && i < length(xs) && prefix == take(i, xs) && suffix == drop(i+1, xs);
ensures update(i, x, xs) == append(prefix, cons(x, suffix));
{
update_rewrite(xs, x, i);
}
#endif /* COMMON_H */

@ -0,0 +1,435 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef QUEUE_H
#define QUEUE_H
#define VERIFAST
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <threading.h>
/*@#include "common.gh"@*/
typedef size_t TickType_t;
typedef size_t UBaseType_t;
typedef ssize_t BaseType_t;
/* Empty/no-op macros */
/* Tracing */
#define traceBLOCKING_ON_QUEUE_PEEK(x)
#define traceBLOCKING_ON_QUEUE_RECEIVE(x)
#define traceBLOCKING_ON_QUEUE_SEND(x)
#define traceQUEUE_CREATE(x)
#define traceQUEUE_CREATE_FAILED(x)
#define traceQUEUE_DELETE(x)
#define traceQUEUE_PEEK(x)
#define traceQUEUE_PEEK_FAILED(x)
#define traceQUEUE_PEEK_FROM_ISR(x)
#define traceQUEUE_PEEK_FROM_ISR_FAILED(x)
#define traceQUEUE_RECEIVE(x)
#define traceQUEUE_RECEIVE_FAILED(x)
#define traceQUEUE_RECEIVE_FROM_ISR(x)
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED(x)
#define traceQUEUE_SEND(x)
#define traceQUEUE_SEND_FAILED(x)
#define traceQUEUE_SEND_FROM_ISR(x)
#define traceQUEUE_SEND_FROM_ISR_FAILED(x)
/* Coverage */
#define mtCOVERAGE_TEST_MARKER()
/* Asserts */
#define configASSERT(x)
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
/* Map portable memory management functions */
#define pvPortMalloc malloc
#define vPortFree free
#define queueSEND_TO_BACK 0
#define queueSEND_TO_FRONT 1
#define queueOVERWRITE 2
#define pdTRUE 1
#define pdFALSE 0
#define pdPASS pdTRUE
#define pdFAIL pdFALSE
#define errQUEUE_FULL 0
#define errQUEUE_EMPTY 0
#define queueUNLOCKED ( ( int8_t ) -1 )
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
#define queueINT8_MAX ( ( int8_t ) 127 )
typedef struct QueuePointers
{
int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
} QueuePointers_t;
typedef struct SemaphoreData
{
#ifdef VERIFAST /*< do not model xMutexHolder */
void *xMutexHolder;
#else
TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */
#endif
UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
} SemaphoreData_t;
/* VeriFast does not support unions so we replace with a struct */
struct fake_union_t {
QueuePointers_t xQueue;
SemaphoreData_t xSemaphore;
};
typedef struct xLIST {
UBaseType_t uxNumberOfItems;
#ifndef VERIFAST /*< do not model pxIndex and xListEnd of xLIST struct */
struct xLIST_ITEM *pxIndex;
MiniListItem_t xListEnd;
#endif
} List_t;
typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ {
int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
#ifdef VERIFAST /*< VeriFast does not model unions */
struct fake_union_t u;
#else
union
{
QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */
SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
} u;
#endif
List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
/*@struct mutex *irqMask;@*/ /*< Ghost mutex simulates the effect of irq masking */
/*@struct mutex *schedulerSuspend;@*/ /*< Ghost mutex simulates the effect of scheduler suspension */
/*@struct mutex *locked;@*/ /*< Ghost mutex simulates the effect of queue locking */
} xQUEUE;
typedef xQUEUE Queue_t;
typedef struct QueueDefinition * QueueHandle_t;
/*@
#define QUEUE_SHAPE(q, Storage, N, M, K) \
malloc_block_QueueDefinition(q) &*& \
q->pcHead |-> Storage &*& \
q->pcWriteTo |-> ?WPtr &*& \
q->u.xQueue.pcTail |-> ?End &*& \
q->u.xQueue.pcReadFrom |-> ?RPtr &*& \
q->uxItemSize |-> M &*& \
q->uxLength |-> N &*& \
q->uxMessagesWaiting |-> K &*& \
q->cRxLock |-> ?rxLock &*& \
q->cTxLock |-> ?txLock &*& \
struct_QueuePointers_padding(&q->u.xQueue) &*& \
struct_SemaphoreData_padding(&q->u.xSemaphore) &*& \
struct_fake_union_t_padding(&q->u) &*& \
struct_xLIST_padding(&q->xTasksWaitingToSend) &*& \
struct_xLIST_padding(&q->xTasksWaitingToReceive) &*& \
q->u.xSemaphore.xMutexHolder |-> _ &*& \
q->u.xSemaphore.uxRecursiveCallCount |-> _ &*& \
true
predicate queue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
QUEUE_SHAPE(q, Storage, N, M, K) &*&
0 < N &*&
0 < M &*&
0 <= W &*& W < N &*&
0 <= R &*& R < N &*&
0 <= K &*& K <= N &*&
W == (R + 1 + K) % N &*&
(-1) <= rxLock &*&
(-1) <= txLock &*&
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
WPtr == Storage + (W*M) &*&
RPtr == Storage + (R*M) &*&
End == Storage + (N*M) &*&
buffer(Storage, N, M, ?contents) &*&
length(contents) == N &*&
abs == take(K, rotate_left((R+1)%N, contents)) &*&
malloc_block(Storage, N*M) &*&
true
;
predicate buffer(char *buffer, size_t N, size_t M; list<list<char> > elements) =
N == 0
? elements == nil
: chars(buffer, M, ?x) &*& buffer(buffer + M, N - 1, M, ?xs) &*& elements == cons(x, xs);
// TODO: buffer_from_chars proof
lemma void buffer_from_chars(char *buffer, size_t N, size_t M);
requires chars(buffer, N*M, _);
ensures exists<list<list<char> > >(?elements) &*& buffer(buffer, N, M, elements) &*& length(elements) == N;
// TODO: split_element proof
lemma void split_element<t>(char *buffer, size_t N, size_t M, size_t i);
requires buffer(buffer, N, M, ?elements) &*& i < N;
ensures
buffer(buffer, i, M, take(i, elements)) &*&
chars(buffer + i * M, M, nth(i, elements)) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, drop(i+1, elements));
// TODO: join_element proof
lemma void join_element(char *buffer, size_t N, size_t M, size_t i);
requires
buffer(buffer, i, M, ?prefix) &*&
chars(buffer + i * M, M, ?element) &*&
buffer(buffer + (i + 1) * M, (N-1-i), M, ?suffix);
ensures buffer(buffer, N, M, append(prefix, cons(element, suffix)));
predicate list(List_t *l;) =
l->uxNumberOfItems |-> _;
predicate queuelists(QueueHandle_t q;) =
list(&q->xTasksWaitingToSend) &*&
list(&q->xTasksWaitingToReceive);
@*/
/* Because prvCopyDataFromQueue does *not* decrement uxMessagesWaiting (K) the
queue predicate above does not hold as a postcondition. If the caller
subsequently decrements K then the queue predicate can be reinstated. */
/*@
predicate queue_after_prvCopyDataFromQueue(QueueHandle_t q, int8_t *Storage, size_t N, size_t M, size_t W, size_t R, size_t K, bool is_locked; list<list<char> >abs) =
QUEUE_SHAPE(q, Storage, N, M, K) &*&
0 < N &*&
0 < M &*&
0 <= W &*& W < N &*&
0 <= R &*& R < N &*&
0 <= K &*& K <= N &*&
W == (R + K) % N &*& //< Differs from queue predicate
(-1) <= rxLock &*&
(-1) <= txLock &*&
(is_locked ? 0 <= rxLock : (-1) == rxLock) &*&
(is_locked ? 0 <= txLock : (-1) == txLock) &*&
WPtr == Storage + (W*M) &*&
RPtr == Storage + (R*M) &*&
End == Storage + (N*M) &*&
buffer(Storage, N, M, ?contents) &*&
length(contents) == N &*&
abs == take(K, rotate_left(R, contents)) &*& //< Differs from queue predicate
malloc_block(Storage, N*M) &*&
true
;
@*/
/* Can't be called `mutex` as this clashes with VeriFast's predicate */
/*@
predicate freertos_mutex(QueueHandle_t q, int8_t *Storage, size_t N, size_t K;) =
QUEUE_SHAPE(q, Storage, N, 0, K) &*&
queuelists(q) &*&
0 < N &*&
0 <= K &*& K <= N &*&
(-1) <= rxLock &*&
(-1) <= txLock &*&
WPtr == Storage &*&
RPtr == Storage &*&
End == Storage &*&
malloc_block(Storage, 0) &*&
chars(Storage, 0, _) &*&
true
;
@*/
/* A queuehandle can be shared between tasks and ISRs. Acquiring the ghost
`irqMask` gives access to the core queue resources. The permissions granted
after masking interrupts depends on the caller:
- A task has access to the queue and the queuelists
- An ISR has access to the queue and, if the queue is unlocked, the queuelists */
/*@
predicate queuehandle(QueueHandle_t q, size_t N, size_t M, bool is_isr;) =
q->irqMask |-> ?m &*& mutex(m, irqs_masked_invariant(q, N, M, is_isr));
predicate_ctor irqs_masked_invariant(QueueHandle_t queue, size_t N, size_t M, bool is_isr)() =
queue(queue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(is_isr && is_locked ? true : queuelists(queue));
@*/
/* A queuesuspend can be shared between tasks. Acquiring the ghost `schedulerSuspend` gives access to the `locked` mutex. */
/*@
predicate_ctor scheduler_suspended_invariant(QueueHandle_t queue)() =
queue->locked |-> ?m &*&
mutex(m, queue_locked_invariant(queue));
predicate queuesuspend(QueueHandle_t q;) =
q->schedulerSuspend |-> ?m &*&
mutex(m, scheduler_suspended_invariant(q));
@*/
/* A queuelock is exclusively acquired by a task. Acquiring the ghost `queuelock` gives access to the queue list resources. */
/*@
predicate queuelock(QueueHandle_t q;) =
q->locked |-> ?m &*&
mutex(m, queue_locked_invariant(q));
predicate_ctor queue_locked_invariant(QueueHandle_t queue)() =
queuelists(queue);
@*/
BaseType_t vListInitialise(List_t *list);
/*@requires list(list);@*/
/*@ensures list(list);@*/
BaseType_t listLIST_IS_EMPTY(List_t *list);
/*@requires list->uxNumberOfItems |-> ?len;@*/
/*@ensures list->uxNumberOfItems |-> len &*& result == (len == 0 ? pdTRUE : pdFALSE);@*/
typedef struct xTIME_OUT
{
BaseType_t xOverflowCount;
TickType_t xTimeOnEntering;
} TimeOut_t;
/*@
predicate xTIME_OUT(struct xTIME_OUT *to;) =
to->xOverflowCount |-> _ &*&
to->xTimeOnEntering |-> _ &*&
struct_xTIME_OUT_padding(to);
@*/
void vTaskInternalSetTimeOutState( TimeOut_t * x);
/*@requires xTIME_OUT(x);@*/
/*@ensures xTIME_OUT(x);@*/
BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait );
/*@requires xTIME_OUT(pxTimeOut) &*& u_integer(pxTicksToWait, _);@*/
/*@ensures xTIME_OUT(pxTimeOut) &*& u_integer(pxTicksToWait, _);@*/
BaseType_t xTaskRemoveFromEventList(List_t *list);
/*@requires list(list);@*/
/*@ensures list(list);@*/
void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait );
/*@requires list(pxEventList);@*/
/*@ensures list(pxEventList);@*/
void vTaskMissedYield();
/*@requires true;@*/
/*@ensures true;@*/
void vTaskSuspendAll();
/*@requires exists<QueueHandle_t>(?xQueue) &*&
[1/2]xQueue->schedulerSuspend |-> ?m &*&
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
xQueue->locked |-> ?m2 &*&
mutex(m2, queue_locked_invariant(xQueue));@*/
BaseType_t xTaskResumeAll( void );
/*@requires exists<QueueHandle_t>(?xQueue) &*&
[1/2]xQueue->schedulerSuspend |-> ?m &*&
mutex_held(m, scheduler_suspended_invariant(xQueue), currentThread, 1/2) &*&
xQueue->locked |-> ?m2 &*&
mutex(m2, queue_locked_invariant(xQueue));@*/
/*@ensures [1/2]xQueue->schedulerSuspend |-> m &*&
[1/2]mutex(m, scheduler_suspended_invariant(xQueue));@*/
void prvLockQueue( QueueHandle_t xQueue );
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuelock(xQueue); @*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/
void prvUnlockQueue( QueueHandle_t xQueue );
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuelock(xQueue);@*/
void setInterruptMask(QueueHandle_t xQueue)
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
queuelists(xQueue);@*/
{
/*@open queuehandle(xQueue, N, M, is_isr);@*/
mutex_acquire(xQueue->irqMask);
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
}
void clearInterruptMask(QueueHandle_t xQueue)
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
[1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, false), currentThread, 1/2) &*&
queuelists(xQueue);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, false);@*/
{
/*@close irqs_masked_invariant(xQueue, N, M, false)();@*/
mutex_release(xQueue->irqMask);
/*@close [1/2]queuehandle(xQueue, N, M, false);@*/
}
#define taskENTER_CRITICAL() setInterruptMask(xQueue)
#define taskEXIT_CRITICAL() clearInterruptMask(xQueue)
#define portYIELD_WITHIN_API()
#define queueYIELD_IF_USING_PREEMPTION()
UBaseType_t setInterruptMaskFromISR(QueueHandle_t xQueue)
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true;@*/
/*@ensures [1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, is_isr), currentThread, 1/2) &*&
queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(is_locked ? true : queuelists(xQueue));@*/
{
/*@open queuehandle(xQueue, N, M, is_isr);@*/
mutex_acquire(xQueue->irqMask);
/*@open irqs_masked_invariant(xQueue, N, M, is_isr)();@*/
return 0;
}
void clearInterruptMaskFromISR(QueueHandle_t xQueue, UBaseType_t uxSavedInterruptStatus)
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
[1/2]xQueue->irqMask |-> ?m &*&
mutex_held(m, irqs_masked_invariant(xQueue, N, M, true), currentThread, 1/2) &*&
(is_locked ? true : queuelists(xQueue));@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, true);@*/
{
/*@close irqs_masked_invariant(xQueue, N, M, true)();@*/
mutex_release(xQueue->irqMask);
/*@close [1/2]queuehandle(xQueue, N, M, true);@*/
}
#define portSET_INTERRUPT_MASK_FROM_ISR() setInterruptMaskFromISR(xQueue)
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus) clearInterruptMaskFromISR(xQueue, uxSavedInterruptStatus)
#endif /* QUEUE_H */

@ -0,0 +1,57 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef QUEUECONTRACTS_H
#define QUEUECONTRACTS_H
#include "queue.h"
void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer );
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
chars(pvBuffer, M, head(abs));@*/
BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition );
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(K < N || xPosition == queueOVERWRITE) &*&
chars(pvItemToQueue, M, ?x) &*&
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
/*@ensures
(xPosition == queueSEND_TO_BACK
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
: (xPosition == queueSEND_TO_FRONT
? (R == 0
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
) &*&
chars(pvItemToQueue, M, x);@*/
BaseType_t prvIsQueueEmpty( Queue_t * pxQueue );
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/
BaseType_t prvIsQueueFull( Queue_t * pxQueue );
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/
#endif /* QUEUECONTRACTS_H */

@ -0,0 +1,25 @@
# FreeRTOS queue proofs
In the queue predicates and proofs we use the following variable names.
- `N` : the queue length (i.e., the maximum number of items the queue can
store)
- `M` : the size in bytes of each element
- `W` : the logical index of the write pointer, necessarily between `0..(N-1)`
- `R` : the logical index of the read pointer, necessarily between `0..(N-1)`
- `K` : the number of items currently in the queue
Consequently, the size of the concrete queue storage is `N*M` bytes. The
`buffer` predicate, defined in `include/proof/queue.h` allows us to treat the
queue storage as a list `contents` of `N` items, each of which is `M` bytes.
The `queue` predicate, defined in `include/proof/queue.h`, relates the concrete
representation to an abstract list `abs` of `K` items. More precisely, the main
queue invariant is:
```
abs == take(K, rotate_left((R+1)%N, contents))
```
where `(R+1)%N` is the front of the queue, `rotate_left` allows for the
wraparound of queue storage, and `take` gives the first `K` elements.

@ -0,0 +1,258 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
/* Simplifying assumption: we do not verify queue initialisation in a
concurrent environment. We assume the queue initialization (including reset)
happens-before all concurrent send/receives. */
#ifdef VERIFAST /*< ***xQueueGenericReset happens-before concurrent behavior*** */
#define taskENTER_CRITICAL()
#define taskEXIT_CRITICAL()
#endif
/* The following intermediate queue predicates summarise states used by queue
initialization but not used elsewhere so we confine them to these proofs
locally. */
/*@
predicate queue_init1(QueueHandle_t q;) =
QUEUE_SHAPE(q, _, _, _, _) &*&
queuelists(q)
;
predicate queue_init2(QueueHandle_t q, int8_t *Storage, size_t N, size_t M;) =
QUEUE_SHAPE(q, Storage, N, M, _) &*&
queuelists(q) &*&
0 < N &*&
chars(Storage, (N*M), _) &*&
malloc_block(Storage, N*M) &*&
Storage + N * M <= (int8_t *)UINTPTR_MAX &*&
true
;
@*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
/*@requires queue_init2(xQueue, ?Storage, ?N, ?M);@*/
/*@ensures 0 == M
? freertos_mutex(xQueue, Storage, N, 0)
: queue(xQueue, Storage, N, M, 0, (N-1), 0, false, nil) &*& queuelists(xQueue);@*/
{
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
taskENTER_CRITICAL();
{
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
pxQueue->pcWriteTo = pxQueue->pcHead;
/*@mul_mono_l(0, N-1, M);@*/
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
pxQueue->cRxLock = queueUNLOCKED;
pxQueue->cTxLock = queueUNLOCKED;
if( xNewQueue == pdFALSE )
{
/* If there are tasks blocked waiting to read from the queue, then
the tasks will remain blocked as after this function exits the queue
will still be empty. If there are tasks blocked waiting to write to
the queue, then one should be unblocked as after this function exits
it will be possible to write to it. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Ensure the event queues start in the correct state. */
vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
}
/* Logically, we move from a flat character array of `N*M` bytes (using
the `chars` predicate) to an array of `N` elements where each element
is `M` bytes (using the `buffer` predicate) */
/*@if (M != 0) { buffer_from_chars(pxQueue->pcHead, N, M); }@*/
}
taskEXIT_CRITICAL();
/* A value is returned for calling semantic consistency with previous
versions. */
return pdPASS;
}
static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
/*@requires queue_init1(pxNewQueue) &*&
0 < uxQueueLength &*& 0 < uxItemSize &*&
malloc_block(pucQueueStorage, uxQueueLength * uxItemSize) &*&
pucQueueStorage + uxQueueLength * uxItemSize <= (uint8_t *)UINTPTR_MAX &*&
uchars(pucQueueStorage, uxQueueLength * uxItemSize,_);@*/
/*@ensures queue(pxNewQueue, ((int8_t *)(void *)pucQueueStorage), uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
queuelists(pxNewQueue);@*/
{
#ifndef VERIFAST /*< void cast of unused var */
/* Remove compiler warnings about unused parameters should
configUSE_TRACE_FACILITY not be set to 1. */
( void ) ucQueueType;
#endif
if( uxItemSize == ( UBaseType_t ) 0 )
{
/* No RAM was allocated for the queue storage area, but PC head cannot
be set to NULL because NULL is used as a key to say the queue is used as
a mutex. Therefore just set pcHead to point to the queue as a benign
value that is known to be within the memory map. */
#ifdef VERIFAST /*< stricter casting */
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pxNewQueue;
#else
pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
#endif
}
else
{
/* Set the head to the start of the queue storage area. */
#ifdef VERIFAST /*< stricter casting */
pxNewQueue->pcHead = ( int8_t * ) ( void * ) pucQueueStorage;
#else
pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
#endif
}
/* Initialise the queue members as described where the queue type is
defined. */
pxNewQueue->uxLength = uxQueueLength;
pxNewQueue->uxItemSize = uxItemSize;
/*@close queue_init2(pxNewQueue, _, uxQueueLength, uxItemSize);@*/
#ifdef VERIFAST /*< void cast of unused return value */
xQueueGenericReset( pxNewQueue, pdTRUE );
#else
( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
{
pxNewQueue->ucQueueType = ucQueueType;
}
#endif /* configUSE_TRACE_FACILITY */
#if( configUSE_QUEUE_SETS == 1 )
{
pxNewQueue->pxQueueSetContainer = NULL;
}
#endif /* configUSE_QUEUE_SETS */
traceQUEUE_CREATE( pxNewQueue );
}
QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
/*@requires 0 < uxQueueLength &*&
0 < uxItemSize &*&
0 < uxQueueLength * uxItemSize &*&
uxQueueLength * uxItemSize <= UINT_MAX;@*/
/*@ensures result == NULL
? true
: queue(result, _, uxQueueLength, uxItemSize, 0, (uxQueueLength-1), 0, false, nil) &*&
queuelists(result) &*&
result->irqMask |-> _ &*&
result->schedulerSuspend |-> _ &*&
result->locked |-> _;@*/
{
Queue_t *pxNewQueue;
size_t xQueueSizeInBytes;
uint8_t *pucQueueStorage;
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
/* Allocate enough space to hold the maximum number of items that
can be in the queue at any time. It is valid for uxItemSize to be
zero in the case the queue is used as a semaphore. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
/* Check for multiplication overflow. */
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
#else
/* Allocate the queue and storage area. Justification for MISRA
deviation as follows: pvPortMalloc() always ensures returned memory
blocks are aligned per the requirements of the MCU stack. In this case
pvPortMalloc() must return a pointer that is guaranteed to meet the
alignment requirements of the Queue_t structure - which in this case
is an int8_t *. Therefore, whenever the stack alignment requirements
are greater than or equal to the pointer to char requirements the cast
is safe. In other cases alignment requirements are not strict (one or
two bytes). */
pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
#endif
if( pxNewQueue != NULL )
{
#ifdef VERIFAST /*< ***model single malloc of struct and buffer*** */
pucQueueStorage = ( uint8_t * ) pvPortMalloc( xQueueSizeInBytes );
if ( pucQueueStorage == NULL ) {
vPortFree( pxNewQueue );
return NULL;
}
/*@malloc_block_limits(pucQueueStorage);@*/
#else
/* Jump past the queue structure to find the location of the queue
storage area. */
pucQueueStorage = ( uint8_t * ) pxNewQueue;
pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
#endif
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
{
/* Queues can be created either statically or dynamically, so
note this task was created dynamically in case it is later
deleted. */
pxNewQueue->ucStaticallyAllocated = pdFALSE;
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
}
else
{
traceQUEUE_CREATE_FAILED( ucQueueType );
mtCOVERAGE_TEST_MARKER();
}
return pxNewQueue;
}

@ -0,0 +1,85 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*& 0 < K &*& chars(pvBuffer, M, _);@*/
/*@ensures queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs) &*&
chars(pvBuffer, M, head(abs));@*/
{
if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
{
/*@assert buffer(Storage, N, M, ?contents);@*/
/*@mul_mono_l(R, N-1, M);@*/
pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
{
/*@div_leq(N, R+1, M);@*/ // now we know R == N-1
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
}
else
{
/*@{
div_lt(R+1, N, M); // now we know R+1 < N
mod_lt(R+1, N); // so, R+1 == (R+1)%N
note(pxQueue->u.xQueue.pcReadFrom == Storage + ((R + 1) * M));
note( Storage + ((R + 1) * M) == Storage + (((R + 1) % N) * M));
}@*/
mtCOVERAGE_TEST_MARKER();
}
/*@mod_plus(R+1, K, N);@*/
/*@mod_mod(R+1, N);@*/
/*@split_element(Storage, N, M, (R+1)%N);@*/
/*@assert
buffer(Storage, (R+1)%N, M, ?prefix) &*&
chars(Storage + ((R+1)%N) * M, M, ?element) &*&
buffer(Storage + ((R+1)%N + 1) * M, (N-1-(R+1)%N), M, ?suffix);@*/
#ifdef VERIFAST /*< void cast of unused return value */
memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
#else
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
#endif
/*@{
combine_list_no_change(prefix, element, suffix, (R+1)%N, contents);
join_element(Storage, N, M, (R+1)%N);
length_take(K, contents);
take_length_eq(K, rotate_left((R+1)%N, contents), abs);
deq_value_lemma(K, (R+1)%N, contents, abs);
}@*/
}
}
void caller_reinstates_queue_predicate(Queue_t *const pxQueue, void *const pvBuffer)
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
0 < K &*&
chars(pvBuffer, M, _);@*/
/*@ensures
queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs)) &*&
chars(pvBuffer, M, head(abs));@*/
{
prvCopyDataFromQueue(pxQueue, pvBuffer);
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
/*@assert buffer(Storage, N, M, ?contents);@*/
pxQueue->uxMessagesWaiting = pxQueue->uxMessagesWaiting - 1;
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
}

@ -0,0 +1,179 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
/*@requires queue(pxQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
(K < N || xPosition == queueOVERWRITE) &*&
chars(pvItemToQueue, M, ?x) &*&
(xPosition == queueSEND_TO_BACK || xPosition == queueSEND_TO_FRONT || (xPosition == queueOVERWRITE && N == 1));@*/
/*@ensures
(xPosition == queueSEND_TO_BACK
? queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)))
: (xPosition == queueSEND_TO_FRONT
? (R == 0
? queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs))
: queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs)))
: xPosition == queueOVERWRITE &*& queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x)))
) &*&
chars(pvItemToQueue, M, x);@*/
{
BaseType_t xReturn = pdFALSE;
UBaseType_t uxMessagesWaiting;
/* This function is called from a critical section. */
uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* The abstract list of list of chars of `Storage` is `contents` */
/*@assert buffer(Storage, N, M, ?contents);@*/
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
{
/* This case is unreachable for queues */
/*@assert false;@*/
#if ( configUSE_MUTEXES == 1 )
{
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{
/* The mutex is no longer being held. */
xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
pxQueue->u.xSemaphore.xMutexHolder = NULL;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_MUTEXES */
}
else if( xPosition == queueSEND_TO_BACK )
{
#ifdef VERIFAST /*< void cast of unused return value */
/* Now we focus the proof on the logical element of the buffer that
will be updated using the following lemma to split the buffer into 3
parts: a prefix, the element we want to update, and the suffix. This
enables the subsequent memcpy to verify. */
/*@split_element(Storage, N, M, W);@*/
/*@assert
buffer(Storage, W, M, ?prefix) &*&
chars(Storage + W * M, M, _) &*&
buffer(Storage + (W + 1) * M, (N-1-W), M, ?suffix);@*/
memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
/* After the update we stitch the buffer back together */
/*@join_element(Storage, N, M, W);@*/
/*@combine_list_update(prefix, x, suffix, W, contents);@*/
#else
( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
#endif
pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
{
/*@div_leq(N, W+1, M);@*/ // now we know W == N-1
pxQueue->pcWriteTo = pxQueue->pcHead;
}
else
{
/*@{
div_lt(W+1, N, M); // now we know W+1 < N
mod_lt(W+1, N); // so, W+1 == (W+1)%N
note(pxQueue->pcWriteTo == Storage + ((W + 1) * M));
note( Storage + ((W + 1) * M) == Storage + (((W + 1) % N) * M));
}@*/
mtCOVERAGE_TEST_MARKER();
}
}
else
{
#ifdef VERIFAST /*< void cast of unused return value */
/*@split_element(Storage, N, M, R);@*/
/*@assert
buffer(Storage, R, M, ?prefix) &*&
chars(Storage + R * M, M, _) &*&
buffer(Storage + (R + 1) * M, (N-1-R), M, ?suffix);@*/
memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
/*@join_element(Storage, N, M, R);@*/
/*@combine_list_update(prefix, x, suffix, R, contents);@*/
#else
( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */
#endif
pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
{
pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
/*@{
div_leq(R-1, 0, M);
leq_bound(R, 0);
}@*/
/*@assert R == 0;@*/
/*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (N-1) * M;@*/
}
else
{
/*@assert 0 < R;@*/
/*@assert pxQueue->u.xQueue.pcReadFrom == Storage + (R-1) * M;@*/
mtCOVERAGE_TEST_MARKER();
}
/*@
if (R == 0) {
mod_plus(N, (K+1), N); mod_same(N); mod_mod(K+1, N);
assert W == ((N-1) + 1 + (K+1)) % N;
}
@*/
if( xPosition == queueOVERWRITE )
{
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* An item is not being added but overwritten, so subtract
one from the recorded number of items in the queue so when
one is added again below the number of recorded items remains
correct. */
--uxMessagesWaiting;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
/*@
if (xPosition == queueSEND_TO_BACK) {
enq_lemma(K, (R+1)%N, contents, abs, x);
mod_plus_one(W, R + 1 + K, N);
mod_plus_distr(R+1, K, N);
} else if (xPosition == queueSEND_TO_FRONT) {
front_enq_lemma(K, R, contents, abs, x);
if (0 < R) {
mod_lt(R, N);
}
}
@*/
return xReturn;
}

@ -0,0 +1,49 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#define taskENTER_CRITICAL() setInterruptMask(pxQueue)
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue)
static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/
{
BaseType_t xReturn;
taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL();
return xReturn;
}

@ -0,0 +1,50 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#define taskENTER_CRITICAL() setInterruptMask(pxQueue)
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue)
static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr);@*/
{
BaseType_t xReturn;
taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL();
return xReturn;
}

@ -0,0 +1,67 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
/* In this case we cannot wrap the macro in a function call to give a function
contract because we require annotations within the macro body, which is not
supported by VeriFast */
#define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL(); \
{ \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
{ \
( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
} \
if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
{ \
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
} \
} \
taskEXIT_CRITICAL()
void wrapper_prvLockQueue( QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuelock(xQueue);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]xQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(xQueue), currentThread, 1/2) &*&
queue_locked_invariant(xQueue)();@*/
{
taskENTER_CRITICAL();
/*@open queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
if( ( xQueue )->cRxLock == queueUNLOCKED )
{
( xQueue )->cRxLock = queueLOCKED_UNMODIFIED;
}
if( ( xQueue )->cTxLock == queueUNLOCKED )
{
( xQueue )->cTxLock = queueLOCKED_UNMODIFIED;
}
}
/*@close queue(xQueue, Storage, N, M, W, R, K, true, abs);@*/
taskEXIT_CRITICAL();
#ifdef VERIFAST /*< ghost action */
mutex_acquire(xQueue->locked);
#endif
}

@ -0,0 +1,162 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#define taskENTER_CRITICAL() setInterruptMask(pxQueue)
#define taskEXIT_CRITICAL() clearInterruptMask(pxQueue)
/* VeriFast: we make one major change. We merge the critical regions for
decrementing `cTxLock` and `cRxLock`. */
static void prvUnlockQueue( Queue_t * const pxQueue )
/*@requires [1/2]queuehandle(pxQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]pxQueue->locked |-> ?m &*&
mutex_held(m, queue_locked_invariant(pxQueue), currentThread, 1/2) &*&
queue_locked_invariant(pxQueue)();@*/
/*@ensures [1/2]queuehandle(pxQueue, N, M, is_isr) &*&
[1/2]queuelock(pxQueue);@*/
{
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
/* The lock counts contains the number of extra data items placed or
removed from the queue while the queue was locked. When a queue is
locked items can be added or removed, but the event lists cannot be
updated. */
taskENTER_CRITICAL();
/*@open queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, _, ?abs);@*/
{
int8_t cTxLock = pxQueue->cTxLock;
/* See if data was added to the queue while it was locked. */
while( cTxLock > queueLOCKED_UNMODIFIED )
/*@invariant queuelists(pxQueue);@*/
{
/* Data was posted while the queue was locked. Are any tasks
blocked waiting for data to become available? */
#if ( configUSE_QUEUE_SETS == 1 )
{
if( pxQueue->pxQueueSetContainer != NULL )
{
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting to
the queue set caused a higher priority task to unblock.
A context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Tasks that are removed from the event list will get
added to the pending ready list as the scheduler is still
suspended. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
break;
}
}
}
#else /* configUSE_QUEUE_SETS */
{
/* Tasks that are removed from the event list will get added to
the pending ready list as the scheduler is still suspended. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that
a context switch is required. */
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
break;
}
}
#endif /* configUSE_QUEUE_SETS */
--cTxLock;
}
pxQueue->cTxLock = queueUNLOCKED;
}
#ifndef VERIFAST /*< ***merge cTxLock and cRxLock critical regions*** */
taskEXIT_CRITICAL();
/* Do the same for the Rx lock. */
taskENTER_CRITICAL();
#endif
{
int8_t cRxLock = pxQueue->cRxLock;
while( cRxLock > queueLOCKED_UNMODIFIED )
/*@invariant queuelists(pxQueue);@*/
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
vTaskMissedYield();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--cRxLock;
}
else
{
break;
}
}
pxQueue->cRxLock = queueUNLOCKED;
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, false, abs);@*/
taskEXIT_CRITICAL();
#ifdef VERIFAST /*< ghost action */
mutex_release(pxQueue->locked);
#endif
}

@ -0,0 +1,68 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
/* It may seem that the read of `pxQueue->uxMessagesWaiting` is required to be
contained in a critical region to be thread-safe. However, it is impossible for
this read to be involved in a data race due to the atomicity mechanism used by
tasks and ISRs: masking and enabling interrupts. If we assume (1) a
uniprocessor system and (2) that higher priority ISRs never call queue API
functions then masking interrupts ensures *strong isolation* meaning critical
regions protected by interrupt masking/enabling are isolated from other
critical regions and code outside of critical regions. */
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*& result == K;@*/
{
UBaseType_t uxReturn;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
uxReturn = pxQueue->uxMessagesWaiting;
return uxReturn;
}
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr);@*/
{
UBaseType_t uxReturn;
configASSERT( xQueue );
taskENTER_CRITICAL();
{
/*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
/*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
}
taskEXIT_CRITICAL();
return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */

@ -0,0 +1,48 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false;@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr);@*/
{
UBaseType_t uxReturn;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
taskENTER_CRITICAL();
/*@assert queue(xQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
/*@assert uxReturn == N - K;@*/
}
/*@close queue(xQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL();
return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */

@ -0,0 +1,80 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#define configSUPPORT_DYNAMIC_ALLOCATION 1
#define configSUPPORT_STATIC_ALLOCATION 0
void vQueueDelete( QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs) &*&
queuelists(xQueue) &*&
xQueue->irqMask |-> _ &*&
xQueue->schedulerSuspend |-> _ &*&
xQueue->locked |-> _;@*/
/*@ensures true;@*/
{
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
traceQUEUE_DELETE( pxQueue );
#if ( configQUEUE_REGISTRY_SIZE > 0 )
{
vQueueUnregisterQueue( pxQueue );
}
#endif
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
/* The queue can only have been allocated dynamically - free it
again. */
vPortFree( pxQueue );
#ifdef VERIFAST /*< leak ghost state on deletion */
/*@leak buffer(_, _, _, _);@*/
/*@leak malloc_block(_, _);@*/
#endif
}
#elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{
/* The queue could have been allocated statically or dynamically, so
check before attempting to free the memory. */
if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{
vPortFree( pxQueue );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#else
{
/* The queue must have been statically allocated, so is not going to be
deleted. Avoid compiler warnings about the unused parameter. */
( void ) pxQueue;
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
}

@ -0,0 +1,278 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#include "proof/queuecontracts.h"
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, ?x) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x);@*/
{
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
TimeOut_t xTimeOut;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif
/*lint -save -e904 This function relaxes the coding standard somewhat to
allow return statements within the function itself. This is done in the
interest of execution time efficiency. */
for( ;; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvItemToQueue, M, x) &*&
u_integer(&xTicksToWait, _) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1)) &*&
xTIME_OUT(&xTimeOut);@*/
{
taskENTER_CRITICAL();
{
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/* Is there room on the queue now? The running task must be the
highest priority task wanting to access the queue. If the head item
in the queue is to be overwritten then it does not matter if the
queue is full. */
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
traceQUEUE_SEND( pxQueue );
/* VeriFast: we do not verify this configuration option */
#if ( configUSE_QUEUE_SETS == 1 )
{
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
if( pxQueue->pxQueueSetContainer != NULL )
{
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
{
/* Do not notify the queue set as an existing item
was overwritten in the queue so the number of items
in the queue has not changed. */
mtCOVERAGE_TEST_MARKER();
}
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to
unblock. A context switch is required. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* If there was a task waiting for data to arrive on the
queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
our own so yield immediately. Yes it is ok to
do this from within the critical section - the
kernel takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
executed if the task was holding multiple mutexes
and the mutexes were given back in an order that is
different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* If there was a task waiting for data to arrive on the
queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
our own so yield immediately. Yes it is ok to do
this from within the critical section - the kernel
takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
executed if the task was holding multiple mutexes and
the mutexes were given back in an order that is
different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
/*@
if (xCopyPosition == queueSEND_TO_BACK) {
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
} else if (xCopyPosition == queueSEND_TO_FRONT) {
close queue(pxQueue, Storage, N, M, W, (R == 0 ? (N-1) : (R-1)), (K+1), is_locked, cons(x, abs));
} else if (xCopyPosition == queueOVERWRITE) {
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
}
@*/
taskEXIT_CRITICAL();
return pdPASS;
}
else
{
if( xTicksToWait == ( TickType_t ) 0 )
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* The queue was full and no block time is specified (or
the block time has expired) so leave now. */
taskEXIT_CRITICAL();
/* Return to the original privilege level before exiting
the function. */
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
else if( xEntryTimeSet == pdFALSE )
{
/* The queue was full and a block time was specified so
configure the timeout structure. */
vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE;
}
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
}
taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */
/*@close exists(pxQueue);@*/
vTaskSuspendAll();
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
if( prvIsQueueFull( pxQueue ) != pdFALSE )
{
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
/*@open queue_locked_invariant(xQueue)();@*/
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Unlocking the queue means queue events can effect the
event list. It is possible that interrupts occurring now
remove this task from the event list again - but as the
scheduler is suspended the task will go onto the pending
ready last instead of the actual ready list. */
/*@close queue_locked_invariant(xQueue)();@*/
prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending
ready list into the ready list - so it is feasible that this
task is already in a ready list before it yields - in which
case the yield will not cause a context switch unless there
is also a higher priority task in the pending ready list. */
/*@close exists(pxQueue);@*/
if( xTaskResumeAll() == pdFALSE )
{
portYIELD_WITHIN_API();
}
}
else
{
/* Try again. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
}
}
else
{
/* The timeout has expired. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
} /*lint -restore */
}

@ -0,0 +1,224 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#include "proof/queuecontracts.h"
BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
/*@requires
[1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvItemToQueue, M, ?x) &*&
integer(pxHigherPriorityTaskWoken, _) &*&
(xCopyPosition == queueSEND_TO_BACK || xCopyPosition == queueSEND_TO_FRONT || (xCopyPosition == queueOVERWRITE && N == 1));@*/
/*@ensures
[1/2]queuehandle(xQueue, N, M, is_isr) &*&
chars(pvItemToQueue, M, x) &*&
integer(pxHigherPriorityTaskWoken, _);@*/
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#endif
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
/* Similar to xQueueGenericSend, except without blocking if there is no room
in the queue. Also don't directly wake a task that was blocked on a queue
read, instead return a flag to say whether a context switch is required or
not (i.e. has a task with a higher priority than us been woken by this
post). */
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
const int8_t cTxLock = pxQueue->cTxLock;
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
traceQUEUE_SEND_FROM_ISR( pxQueue );
/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
semaphore or mutex. That means prvCopyDataToQueue() cannot result
in a task disinheriting a priority and prvCopyDataToQueue() can be
called here even though the disinherit function does not check if
the scheduler is suspended before accessing the ready lists. */
#ifdef VERIFAST /*< void cast of unused return value */
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#else
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#endif
/*@open queue(pxQueue, _, N, M, _, _, _, _, _);@*/
/* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */
if( cTxLock == queueUNLOCKED )
{
/* VeriFast: we do not verify this configuration option */
#if ( configUSE_QUEUE_SETS == 1 )
{
if( pxQueue->pxQueueSetContainer != NULL )
{
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
{
/* Do not notify the queue set as an existing item
was overwritten in the queue so the number of items
in the queue has not changed. */
mtCOVERAGE_TEST_MARKER();
}
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to
unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so
record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* Not used in this path. */
#ifndef VERIFAST /*< void cast of unused var */
( void ) uxPreviousMessagesWaiting;
#endif
}
#endif /* configUSE_QUEUE_SETS */
}
else
{
/* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */
configASSERT( cTxLock != queueINT8_MAX);
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
}
xReturn = pdPASS;
/*@
if (xCopyPosition == queueSEND_TO_BACK) {
close queue(pxQueue, Storage, N, M, (W+1)%N, R, (K+1), is_locked, append(abs, singleton(x)));
} else if (xCopyPosition == queueSEND_TO_FRONT) {
if (R == 0) {
close queue(pxQueue, Storage, N, M, W, (N-1), (K+1), is_locked, cons(x, abs));
} else {
close queue(pxQueue, Storage, N, M, W, (R-1), (K+1), is_locked, cons(x, abs));
}
} else if (xCopyPosition == queueOVERWRITE) {
close queue(pxQueue, Storage, N, M, W, R, 1, is_locked, singleton(x));
}
@*/
}
else
{
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}

@ -0,0 +1,48 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*&
result == ((K == 0) ? pdTRUE : pdFALSE);@*/
{
BaseType_t xReturn;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */

@ -0,0 +1,48 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/*@requires queue(xQueue, ?Storage, ?N, ?M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
/*@ensures queue(xQueue, Storage, N, M, W, R, K, is_locked, abs) &*&
result == ((K == N) ? pdTRUE : pdFALSE);@*/
{
BaseType_t xReturn;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
#endif
configASSERT( pxQueue );
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{
xReturn = pdTRUE;
}
else
{
xReturn = pdFALSE;
}
return xReturn;
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */

@ -0,0 +1,206 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#include "proof/queuecontracts.h"
BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{
BaseType_t xEntryTimeSet = pdFALSE;
TimeOut_t xTimeOut;
int8_t *pcOriginalReadPosition;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
/* Check the pointer is not NULL. */
configASSERT( ( pxQueue ) );
/* The buffer into which data is received can only be NULL if the data size
is zero (so no data is copied into the buffer. */
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif
/*lint -save -e904 This function relaxes the coding standard somewhat to
allow return statements within the function itself. This is done in the
interest of execution time efficiency. */
for( ;; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, x) &*&
u_integer(&xTicksToWait, _) &*&
xTIME_OUT(&xTimeOut);@*/
{
taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Is there data in the queue now? To be running the calling task
must be the highest priority task wanting to access the queue. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* Remember the read position so it can be reset after the data
is read from the queue as this function is only peeking the
data, not removing it. */
pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataFromQueue( pxQueue, pvBuffer );
traceQUEUE_PEEK( pxQueue );
/* The data is not being removed, so reset the read pointer. */
pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
/* The data is being left in the queue, so see if there are
any other tasks waiting for the data. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority than this task. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL();
return pdPASS;
}
else
{
if( xTicksToWait == ( TickType_t ) 0 )
{
/* The queue was empty and no block time is specified (or
the block time has expired) so leave now. */
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL();
traceQUEUE_PEEK_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
else if( xEntryTimeSet == pdFALSE )
{
/* The queue was empty and a block time was specified so
configure the timeout structure ready to enter the blocked
state. */
vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE;
}
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */
/*@close exists<QueueHandle_t>(pxQueue);@*/
vTaskSuspendAll();
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
/* Timeout has not expired yet, check to see if there is data in the
queue now, and if not enter the Blocked state to wait for data. */
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
/*@open queue_locked_invariant(xQueue)();@*/
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
/*@close queue_locked_invariant(xQueue)();@*/
prvUnlockQueue( pxQueue );
/*@close exists<QueueHandle_t>(pxQueue);@*/
if( xTaskResumeAll() == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* There is data in the queue now, so don't enter the blocked
state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists<QueueHandle_t>(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
}
}
else
{
/* The timeout has expired. If there is still no data in the queue
exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists<QueueHandle_t>(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{
traceQUEUE_PEEK_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
} /*lint -restore */
}

@ -0,0 +1,89 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#include "proof/queuecontracts.h"
BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
int8_t *pcOriginalReadPosition;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
#endif
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
/* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
{
traceQUEUE_PEEK_FROM_ISR( pxQueue );
/* Remember the read position so it can be reset as nothing is
actually being removed from the queue. */
pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataFromQueue( pxQueue, pvBuffer );
pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
xReturn = pdPASS;
}
else
{
xReturn = pdFAIL;
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}

@ -0,0 +1,204 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#include "proof/queuecontracts.h"
BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == false &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, ?x);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x));@*/
{
BaseType_t xEntryTimeSet = pdFALSE;
TimeOut_t xTimeOut;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
/* Check the pointer is not NULL. */
configASSERT( ( pxQueue ) );
/* The buffer into which data is received can only be NULL if the data size
is zero (so no data is copied into the buffer). */
configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
/* Cannot block if the scheduler is suspended. */
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
#endif
/*lint -save -e904 This function relaxes the coding standard somewhat to
allow return statements within the function itself. This is done in the
interest of execution time efficiency. */
for( ;; )
/*@invariant [1/2]queuehandle(xQueue, N, M, is_isr) &*&
[1/2]queuesuspend(xQueue) &*&
chars(pvBuffer, M, x) &*&
u_integer(&xTicksToWait, _) &*&
xTIME_OUT(&xTimeOut);@*/
{
taskENTER_CRITICAL();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Is there data in the queue now? To be running the calling task
must be the highest priority task wanting to access the queue. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* Data available, remove one item. */
prvCopyDataFromQueue( pxQueue, pvBuffer );
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
traceQUEUE_RECEIVE( pxQueue );
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
/*@assert
pxQueue->pcHead |-> ?buffer &*&
buffer(buffer, N, M, ?contents);@*/
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
/* There is now space in the queue, were any tasks waiting to
post to the queue? If so, unblock the highest priority waiting
task. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/
/*@assert chars(pvBuffer, M, head(abs));@*/
taskEXIT_CRITICAL();
return pdPASS;
}
else
{
if( xTicksToWait == ( TickType_t ) 0 )
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
/* The queue was empty and no block time is specified (or
the block time has expired) so leave now. */
taskEXIT_CRITICAL();
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
else if( xEntryTimeSet == pdFALSE )
{
/* The queue was empty and a block time was specified so
configure the timeout structure. */
vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE;
}
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
}
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue
now the critical section has been exited. */
/*@close exists(pxQueue);@*/
vTaskSuspendAll();
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
/* The timeout has not expired. If the queue is still empty place
the task on the list of tasks waiting to receive from the queue. */
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
/*@open queue_locked_invariant(xQueue)();@*/
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
/*@close queue_locked_invariant(xQueue)();@*/
prvUnlockQueue( pxQueue );
/*@close exists(pxQueue);@*/
if( xTaskResumeAll() == pdFALSE )
{
portYIELD_WITHIN_API();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* The queue contains data again. Loop back to try and read the
data. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
}
}
else
{
/* Timed out. If there is no data in the queue exit, otherwise loop
back and attempt to read the data. */
prvUnlockQueue( pxQueue );
#ifdef VERIFAST /*< void cast of unused return value */
/*@close exists(pxQueue);@*/
xTaskResumeAll();
#else
( void ) xTaskResumeAll();
#endif
if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
} /*lint -restore */
}

@ -0,0 +1,134 @@
/*
* FreeRTOS VeriFast Proofs
* Copyright (C) Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "proof/queue.h"
#include "proof/queuecontracts.h"
BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
/*@requires [1/2]queuehandle(xQueue, ?N, ?M, ?is_isr) &*& is_isr == true &*&
chars(pvBuffer, M, ?x) &*&
pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _);@*/
/*@ensures [1/2]queuehandle(xQueue, N, M, is_isr) &*&
(result == pdPASS ? chars(pvBuffer, M, _) : chars(pvBuffer, M, x)) &*&
(pxHigherPriorityTaskWoken == NULL ? true : integer(pxHigherPriorityTaskWoken, _));@*/
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
#ifdef VERIFAST /*< const pointer declaration */
Queue_t * pxQueue = xQueue;
#else
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
#endif
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
above the maximum system call priority are kept permanently enabled, even
when the RTOS kernel is in a critical section, but cannot make any calls to
FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
failure if a FreeRTOS API function is called from an interrupt that has been
assigned a priority above the configured maximum system call priority.
Only FreeRTOS functions that end in FromISR can be called from interrupts
that have been assigned a priority at or (logically) below the maximum
system call interrupt priority. FreeRTOS maintains a separate interrupt
safe API to ensure interrupt entry is as fast and as simple as possible.
More information (albeit Cortex-M specific) is provided on the following
link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
/*@assert queue(pxQueue, ?Storage, N, M, ?W, ?R, ?K, ?is_locked, ?abs);@*/
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Cannot block in an ISR, so check there is data available. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{
const int8_t cRxLock = pxQueue->cRxLock;
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
prvCopyDataFromQueue( pxQueue, pvBuffer );
/*@open queue_after_prvCopyDataFromQueue(pxQueue, Storage, N, M, W, (R+1)%N, K, is_locked, abs);@*/
pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
/*@assert buffer(Storage, N, M, ?contents);@*/
/*@deq_lemma(K, (R+1)%N, contents, abs, head(abs));@*/
/* If the queue is locked the event list will not be modified.
Instead update the lock count so the task that unlocks the queue
will know that an ISR has removed data while the queue was
locked. */
if( cRxLock == queueUNLOCKED )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
/* The task waiting has a higher priority than us so
force a context switch. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Increment the lock count so the task that unlocks the queue
knows that data was removed while it was locked. */
configASSERT( cRxLock != queueINT8_MAX);
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
}
/*@close queue(pxQueue, Storage, N, M, W, (R+1)%N, K-1, is_locked, tail(abs));@*/
/*@assert chars(pvBuffer, M, head(abs));@*/
xReturn = pdPASS;
}
else
{
/*@close queue(pxQueue, Storage, N, M, W, R, K, is_locked, abs);@*/
xReturn = pdFAIL;
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}

@ -0,0 +1,3 @@
#!/bin/bash -eu
NO_COVERAGE=1 EXTRA_VERIFAST_ARGS=-stats make list queue | grep overhead: | sort | uniq

@ -0,0 +1,20 @@
# Generate callgraph
## Requirements
- python3
- pycparser
- graphviz/dot
- [inconsolata](https://fonts.google.com/specimen/Inconsolata)
## Instructions
```
cd scripts
git clone https://github.com/eliben/pycparser.git #< you need this for pycparser's libc headers even if pycparser is installed
mkdir fake_include
touch fake_include/threading.h
gcc -E -I pycparser/utils/fake_libc_include/ -I ../include/ -I fake_include/ ../queue/*.c > out.pp
./callgraph.py > out.dot
dot -Nfontname=inconsolata -Tpng -o callgraph.png out.dot
```

@ -0,0 +1,83 @@
#!/usr/bin/env python3
from __future__ import print_function
from pycparser import c_parser, c_ast, parse_file
import sys
ignore_callee = set(['mutex_acquire', 'mutex_release'])
ignore_caller = set(['caller_reinstates_queue_predicate'])
proven = [
'prvCopyDataFromQueue',
'prvCopyDataToQueue',
'prvInitialiseNewQueue',
'prvIsQueueEmpty',
'prvIsQueueFull',
'prvLockQueue',
'prvUnlockQueue',
'uxQueueMessagesWaiting',
'uxQueueSpacesAvailable',
'vQueueDelete',
'xQueueGenericCreate',
'xQueueGenericReset',
'xQueueGenericSend',
'xQueueGenericSendFromISR',
'xQueueIsQueueEmptyFromISR',
'xQueueIsQueueFullFromISR',
'xQueuePeek',
'xQueuePeekFromISR',
'xQueueReceive',
'xQueueReceiveFromISR',
]
modeled = [
'setInterruptMask',
'clearInterruptMask',
'setInterruptMaskFromISR',
'clearInterruptMaskFromISR',
'vTaskSuspendAll',
'xTaskResumeAll',
]
CALLMAP = set()
class FuncCallVisitor(c_ast.NodeVisitor):
def __init__(self, caller):
self.caller = caller
def visit_FuncCall(self, node):
callee = node.name.name
if callee not in ignore_callee:
CALLMAP.add((node.name.name, self.caller))
class FuncDefVisitor(c_ast.NodeVisitor):
def visit_FuncDef(self, node):
caller = node.decl.name
if caller in ignore_caller:
return
if caller.startswith('wrapper_'):
caller = caller[8:]
v = FuncCallVisitor(caller)
v.visit(node)
def show_func_calls(filename):
ast = parse_file(filename, use_cpp=False)
v = FuncDefVisitor()
v.visit(ast)
if __name__ == "__main__":
filename = 'out.pp'
show_func_calls(filename)
print('digraph G {')
print(' rankdir=LR;')
print(' node [style = filled, colorscheme = set13;];')
for f in proven:
print(' %s [fillcolor = 3];' % f)
for f in modeled:
print(' %s [fillcolor = 2];' % f)
for (callee, caller) in CALLMAP:
print(' %s -> %s;' % (callee, caller))
print('}')

@ -0,0 +1,40 @@
# Generate diffs between FreeRTOS source and proofs
## Requirements
- python3
- ctags 5.8
- diff 3.4+
- [diff2html](https://diff2html.xyz/)
## Instructions
The following will extract per-function files from the original FreeRTOS source
implementation and the proof directory.
```
cd scripts
./generate_diff_files.sh
# will extract to ./FreeRTOS-Kernel/generated and ./queue/generated
```
Then use `diff` for a side-by-side comparison. Note that the `--color=always`
flag needs v3.4+:
```
diff --color=always --width=$COLUMNS --suppress-common-lines --side-by-side FreeRTOS-Kernel/generated queue/generated | less -r
```
Or generate a html report using `diff2html`:
```
diff -u FreeRTOS-Kernel/generated queue/generated | diff2html -i stdin
```
The expectation is that the proofs make minimal changes to the original source
implementation in the form of:
- VeriFast annotations `/*@...@*/` and `//*...`
- Additional comments explaining the proof `/*...*/`
- Flagged changes within `#if[n]def VERIFAST`

@ -0,0 +1,47 @@
#!/bin/bash -eu
FUNCS=(
prvCopyDataFromQueue
prvCopyDataToQueue
prvInitialiseNewQueue
prvIsQueueEmpty
prvIsQueueFull
prvUnlockQueue
uxQueueMessagesWaiting
uxQueueSpacesAvailable
vQueueDelete
xQueueGenericCreate
xQueueGenericReset
xQueueGenericSend
xQueueGenericSendFromISR
xQueueIsQueueEmptyFromISR
xQueueIsQueueFullFromISR
xQueuePeek
xQueuePeekFromISR
xQueueReceive
xQueueReceiveFromISR
)
if [ ! -d "FreeRTOS-Kernel" ]; then
git clone https://github.com/FreeRTOS/FreeRTOS-Kernel.git
fi
pushd FreeRTOS-Kernel > /dev/null
rm -rf tags generated
ctags --excmd=number queue.c
mkdir generated
for f in ${FUNCS[@]}; do
../extract.py tags $f > generated/$f.c
done
popd > /dev/null
echo "created: FreeRTOS-Kernel/generated"
ln -fs ../queue .
pushd queue > /dev/null
rm -rf tags generated
ctags --excmd=number *.c
mkdir generated
for f in ${FUNCS[@]}; do
../scripts/extract.py tags $f > generated/$f.c
done
popd > /dev/null
echo "created: queue/generated"
Loading…
Cancel
Save