import math
import time
from IPython.display import HTML
from itables import to_html_datatable
from joblib import Parallel, delayed, cpu_count
import numpy as np
import pandas as pd
import plotly.express as px
import scipy.stats as st
import simpy
from sim_tools.distributions import Exponential
Mathematical proof of correctness
Learning objectives:
- Understand why and how to verify a model using mathematical proof of correctness.
Pre-reading:
This page implements mathematical proof of correctness implemented on the verification and validation page. It does so within a test (tests page).
The test is run on the model from the parallel processing page (likewise, also used on the scenario and sensitivity analysis page and tests pages).
Entity generation → Entity processing → Initialisation bias → Performance measures → Replications → Parallel processing → Mathematical proof of correctness
Required packages:
These should be available from environment setup in the “Test yourself” section of Environments.
library(data.table)
library(dplyr)
library(future)
library(future.apply)
library(ggplot2)
library(knitr)
library(plotly)
library(queueing)
library(simmer)
library(tidyr)
What is mathematical proof of correctness?
One of the suggested methods for model verification is mathematical proof of correctness. This involves showing that the simulation results align with those determined by a suitable mathematical model.
A mathematical model is a set of equations or formulas that describe how systems behave. Over time, researchers have studied simple systems (like people waiting in a queue) and discovered that, when rules are straightforward, their behaviour can be described exactly using math.
Real-world systems are often too complex for neat equations (e.g., variable arrivals, different service rules or other complex features). In these cases, discrete-event simulation allows us to better mimic the system. However, if your simulation is simple enough - or if you start with a basic model to verify your code (and later build in complexity) - you can use mathematical proof of correctness.
The small example we’ve been building throughout this book is an instance of an M/M/s queue model (as is the nurse visit simulation example). For a reminder on what an M/M/s queue model is, see the section “Find out more about M/M/s models” on the example conceptual models page. In the following sections, we’ll show how to verify our small example by comparing it to a mathematical model.
Simulation code
The model used is the same as that on the parallel processing page - as also used on the scenario and sensitivity analysis and tests pages.
Mathematical M/M/s queue model
The class implementing a mathematical M/M/s queue model is explained line-by-line below.
class MMSQueue:
"""
Analytical M/M/S queue formulas.
Parameters
----------
arrival_rate : float
Customer arrival rate (λ).
service_rate : float
Service rate per server (μ).
num_servers : int
Number of servers (s).
Attributes
----------
rho : float
Utilisation (λ / (sμ)).
lambda_over_mu : float
Arrival/service rate ratio (λ / μ).
metrics : dict
Calculated performance metrics.
"""
def __init__(self, arrival_rate, service_rate, num_servers):
"""
Initialise the M/M/S queue.
Raises
------
ValueError
If parameters are invalid or system is unstable.
"""
if arrival_rate <= 0:
raise ValueError("Arrival rate must be positive")
if service_rate <= 0:
raise ValueError("Service rate must be positive")
if num_servers < 1:
raise ValueError("Number of servers must be at least 1")
self.arrival_rate = arrival_rate
self.service_rate = service_rate
self.num_servers = num_servers
# Calculate utilisation
self.rho = self.get_traffic_intensity()
# Check system stability
if self.rho >= 1:
raise ValueError(
f"System is unstable: ρ = {self.rho:.4f} >= 1. "
f"Need λ < s*μ ({arrival_rate} < {num_servers * service_rate})"
)
# Calculate λ/μ (average customers in service if infinite servers)
self.lambda_over_mu = self.arrival_rate / self.service_rate
# Calculate performance metrics using Little's Law
self.metrics = self.calculate_metrics()
def get_traffic_intensity(self):
"""
Calculate the traffic intensity (server utilisation).
Returns
-------
float
Traffic intensity ρ = λ/(s*μ).
"""
return self.arrival_rate / (self.num_servers * self.service_rate)
def calculate_metrics(self):
"""
Calculate all performance metrics for the queue.
Returns
-------
dict[str, float]
Dictionary containing performance metrics.
"""
= {}
metrics "rho"] = self.rho
metrics["L_q"] = self.get_mean_queue_length()
metrics["L_s"] = metrics["L_q"] + (
metrics[self.arrival_rate / self.service_rate
)"W_s"] = metrics["L_s"] / self.arrival_rate
metrics["W_q"] = metrics["W_s"] - (1 / self.service_rate)
metrics[return metrics
def get_mean_queue_length(self):
"""
Calculate the expected number of customers waiting in queue (L_q).
Uses the formula:
L_q = P₀ * (λ/μ)^s * ρ / (s! * (1-ρ)²)
Returns
-------
float
Expected queue length.
"""
= self.prob_system_empty()
p0
= (p0 * (self.lambda_over_mu**self.num_servers) * self.rho) / (
lq self.num_servers) * (1 - self.rho) ** 2
math.factorial(
)
return lq
def prob_system_empty(self):
"""
Calculate the probability that the system is empty (P₀).
Uses the formula:
P₀ = [Σ(n=0 to s-1) (λ/μ)^n/n! + (λ/μ)^s/(s!(1-ρ))]^(-1)
Returns
-------
float
Probability that system is empty.
"""
# Sum for n = 0 to s-1
= sum(
sum_part self.lambda_over_mu**n) / math.factorial(n)
(for n in range(self.num_servers)
)
# Term for n >= s
= (self.lambda_over_mu**self.num_servers) / (
server_term self.num_servers) * (1 - self.rho)
math.factorial(
)
return 1 / (sum_part + server_term)
Explaining MMSQueue
class MMSQueue:
"""
Analytical M/M/S queue formulas.
Parameters
----------
arrival_rate : float
Customer arrival rate (λ).
service_rate : float
Service rate per server (μ).
num_servers : int
Number of servers (s).
Attributes
----------
rho : float
Utilisation (λ / (sμ)).
lambda_over_mu : float
Arrival/service rate ratio (λ / μ).
metrics : dict
Calculated performance metrics.
"""
def __init__(self, arrival_rate, service_rate, num_servers):
"""
Initialise the M/M/S queue.
Raises
------
ValueError
If parameters are invalid or system is unstable.
"""
if arrival_rate <= 0:
raise ValueError("Arrival rate must be positive")
if service_rate <= 0:
raise ValueError("Service rate must be positive")
if num_servers < 1:
raise ValueError("Number of servers must be at least 1")
self.arrival_rate = arrival_rate
self.service_rate = service_rate
self.num_servers = num_servers
MMSQueue
requires three inputs:
arrival_rate
(λ) - average number of customers (patients) per time unit (minute).service_rate
(μ) - average number of customers (patients) a single server (doctor) can serve per time unit (minute).num_servers
(s) - number of servers (doctors) in the system.
It checks the inputs are valid (no negative or zero rates, at least one server) and stores them as attributes.
# Calculate utilisation
self.rho = self.get_traffic_intensity()
# Check system stability
if self.rho >= 1:
raise ValueError(
f"System is unstable: ρ = {self.rho:.4f} >= 1. "
f"Need λ < s*μ ({arrival_rate} < {num_servers * service_rate})"
)
It then calculates utilisation (ρ) by calling the get_traffic_intensity()
method. Utilisation is a measure of how busy the servers are - and if it is close to 1, it means the servers are almost always busy.
If the calculation utilisation is greater than 1, it means that customers arrive faster than the servers can serve them. In this situation, the queue would keep growing indefinitely. There is no steady state, and the system is unstable. When this happens, the model stops and raises an error instead of producing meaningless results.
Note: This behaviour is different from a discrete-event simulation model, where utilisation is capped at 1 (since servers cannot be more than fully busy). However, you can still have a queue that grows indefinitely - you would just notice it by looking at other metrics like queue length or waiting time, and see them continuing to increase over time.
# Calculate λ/μ (average customers in service if infinite servers)
self.lambda_over_mu = self.arrival_rate / self.service_rate
# Calculate performance metrics using Little's Law
self.metrics = self.calculate_metrics()
Next, we calculate lambda_over_mu
, which represents the average number of customers being served if there were infinite servers. It’s computed here to avoid code repetition, as it is used repeatedly in later formulas.
After that, the model calls calculate_metrics()
.
def get_traffic_intensity(self):
"""
Calculate the traffic intensity (server utilisation).
Returns
-------
float
Traffic intensity ρ = λ/(s*μ).
"""
return self.arrival_rate / (self.num_servers * self.service_rate)
The method get_traffic_intensity()
(as called in __init__()
computes utilisation by finding the fraction of total capacity being used.
def calculate_metrics(self):
"""
Calculate all performance metrics for the queue.
Returns
-------
dict[str, float]
Dictionary containing performance metrics.
"""
= {}
metrics "rho"] = self.rho
metrics["L_q"] = self.get_mean_queue_length()
metrics["L_s"] = metrics["L_q"] + (
metrics[self.arrival_rate / self.service_rate
)"W_s"] = metrics["L_s"] / self.arrival_rate
metrics["W_q"] = metrics["W_s"] - (1 / self.service_rate)
metrics[return metrics
The calculate_metrics()
method uses several analytical formulas to compute the main queue performance measures.
Symbol | Meaning | Formula |
---|---|---|
ρ | Utilisation | λ / (sμ) |
Lq | Average queue length | Computed using get_mean_queue_length() |
Ls | Average number in system | Lq + (λ/μ) |
Ws | Average time in system | Ls / λ |
Wq | Average waiting time in queue | Ws − (1/μ) |
All results are returned in a dictionary.
Fun fact: The formula for average time in system makes use of Little’s Law, which Ls = λ * Ws. This is rearranged to calculate time in system: Ws = Ls / λ.
def get_mean_queue_length(self):
"""
Calculate the expected number of customers waiting in queue (L_q).
Uses the formula:
L_q = P₀ * (λ/μ)^s * ρ / (s! * (1-ρ)²)
Returns
-------
float
Expected queue length.
"""
= self.prob_system_empty()
p0
= (p0 * (self.lambda_over_mu**self.num_servers) * self.rho) / (
lq self.num_servers) * (1 - self.rho) ** 2
math.factorial(
)
return lq
As the calculation for mean queue length is a little more complicated, we have a separate method to calculate it.
First, we compute P0 which is the probability that the system is completely empty (using prob_system_empty()
). We then plug this into the formula to calculate Lq (the mean queue length).
def prob_system_empty(self):
"""
Calculate the probability that the system is empty (P₀).
Uses the formula:
P₀ = [Σ(n=0 to s-1) (λ/μ)^n/n! + (λ/μ)^s/(s!(1-ρ))]^(-1)
Returns
-------
float
Probability that system is empty.
"""
# Sum for n = 0 to s-1
= sum(
sum_part self.lambda_over_mu**n) / math.factorial(n)# Validates a discrete event simulation of a healthcare M/M/S queue by
(# comparing simulation results to analytical queueing theory.
#
# Metrics (using standard queueing theory notation):
# - ρ (rho): utilisation
# - Lq: mean queue length
# - W: mean time in system
# - Wq: mean waiting
#
# Results must match theory with a 15% tolerance (accomodates stochasticity).
# Tests are run across diverse parameter combinations and utilisation levels.
# System stability requires arrival rate < number_of_servers * service_rate.
#' Run simulation and return key performance indicators using standard queueing
#' theory notation.
#'
#' The warm-up period should be sufficiently long to allow the system to reach
#' steady-state before data collection begins.
<- function(
run_simulation_model
patient_inter, mean_n_consult_time, number_of_nurses
) {# Run simulation
<- parameters(
param = patient_inter,
patient_inter = mean_n_consult_time,
mean_n_consult_time = number_of_nurses,
number_of_nurses = 500L,
warm_up_period = 1500L,
data_collection_period = 100L,
number_of_runs = 0L,
scenario_name = 1L
cores
)<- runner(param)[["run_results"]]
run_results
# Get overall results, using queueing theory notation in column names
<- run_results |>
results
summarise(= mean(.data[["utilisation_nurse"]]),
RO = mean(.data[["mean_queue_length_nurse"]]),
Lq = mean(.data[["mean_time_in_system"]]),
W = mean(.data[["mean_waiting_time_nurse"]])
Wq
)
results
}
for n in range(self.num_servers)
)
# Term for n >= s
= (self.lambda_over_mu**self.num_servers) / (
server_term self.num_servers) * (1 - self.rho)
math.factorial(
)
return 1 / (sum_part + server_term)
This method calculates P0, which is the probability that the system is completely empty - i.e., no customers are waiting and all servers are idle.
In R, there is a package queueing which makes this task super easy!
To create a theoretical M/M/s queue model, we first need to calculate lambda and mu based on our inter-arrival time and service times:
# Example parameters
<- 5
interarrival_time <- 5
service_length <- 2
servers
# Calculate lambda and mu
<- 1L / interarrival_time
lambda <- 1L / service_length mu
We can then use queueing::NewInput.MMC()
to set-up an M/M/s (M/M/c) model. This function takes the arrival rate (lambda
), service rate (mu
) and the number of servers (c
). There are two other parameters n
and method
, but we just specify the defaults here (0) (see documentation for more details).
<- queueing::NewInput.MMC(
math_model lambda = lambda, mu = mu, c = servers, n = 0L, method = 0L
) math_model
$lambda
[1] 0.2
$mu
[1] 0.2
$c
[1] 2
$n
[1] 0
$method
[1] 0
attr(,"class")
[1] "i_MMC"
We then provide this model to queueing::QueueingModel()
, which builds the model and returns the relevant calculations.
<- queueing::QueueingModel(math_model)
math_results math_results
$Inputs
$lambda
[1] 0.2
$mu
[1] 0.2
$c
[1] 2
$n
[1] 0
$method
[1] 0
attr(,"class")
[1] "i_MMC"
$RO
[1] 0.5
$Lq
[1] 0.3333333
$VNq
[1] 0.8888889
$Wq
[1] 1.666667
$VTq
[1] 13.88889
$Throughput
[1] 0.2
$L
[1] 1.333333
$VN
[1] 2.222222
$W
[1] 6.666667
$VT
[1] 38.88889
$Wqq
[1] 5
$Lqq
[1] 2
$Pn
[1] 0.3333333
$Qn
[1] 0.3333333
$FW
function(t)
{
if (r == (x$c - 1))
{
res <- 1 - ( (1 + cErlang * x$mu * t) * exp(-x$mu * t) )
}
else
{
aux1 <- ( r - x$c + 1 - cErlang ) * exp(-x$mu * t)
aux2 <- cErlang * exp( (-1) * (1 - RO) * x$c * x$mu * t )
aux <- (aux1 + aux2)/( x$c - 1 - r )
res <- 1 + aux
}
res
}
<bytecode: 0x56413e164e68>
<environment: 0x56413e14b680>
$FWq
function(t)
{
1 - ( cErlang * exp( (-1) * (1 - RO) * x$c * x$mu * t ) )
}
<bytecode: 0x56413e173c30>
<environment: 0x56413e14b680>
attr(,"class")
[1] "o_MMC"
Function to run our simulation
This function simply runs our simulation model and returns the relevant metrics, renamed to use queueing theory notation.
We could have included this code as part of the test function below - but have just made it in a separate function to keep that test function a little simpler.
Our short illustrative parameters (30 minute warm-up, 40 minute data collection, 5 replications) are very unstable, so we have increased them to get more reliable results for the comparison in this test.
def run_simulation_model(
interarrival_time,
consultation_time,
number_of_doctors
):"""
Run simulation and return key performance indicators using standard
queueing theory notation.
The warm-up period should be sufficiently long to allow the system
to reach steady-state before data collection begins.
Parameters
----------
interarrival_time : float
Mean time between arrivals (minutes).
consultation_time : float
Mean length of doctor's consultation (minutes).
number_of_doctors : int
Number of doctors.
"""
= Parameters(
param =interarrival_time,
interarrival_time=consultation_time,
consultation_time=number_of_doctors,
number_of_doctors=2000,
warm_up_period=4000,
data_collection_period=20,
number_of_runs=1,
cores=False
verbose
)= Runner(param)
runner = runner.run_reps()
results
# Rename the columns using queuing theory notation
= {
mapping "mean_wait_time": "W_q",
"mean_time_in_system": "W_s",
"mean_utilisation_tw": "rho",
"mean_queue_length": "L_q",
}= results["overall"].rename(columns=mapping)
df
# Return relevant columns
return df[mapping.values()].T["mean"]
For example:
=5,
run_simulation_model(interarrival_time=10,
consultation_time=3) number_of_doctors
W_q 4.168970
W_s 14.077349
rho 0.657581
L_q 0.835643
Name: mean, dtype: float64
#' Run simulation and return key performance indicators using standard queueing
#' theory notation.
#'
#' The warm-up period should be sufficiently long to allow the system to reach
#' steady-state before data collection begins.
#'
#' @param interarrival_time Mean time between arrivals (minutes).
#' @param consultation_time Mean length of doctor's consultation (minutes).
#' @param number_of_doctors Number of doctors.
<- function(
run_simulation_model
interarrival_time, consultation_time, number_of_doctors
) {# Run simulation
<- create_params(
param interarrival_time = interarrival_time,
consultation_time = consultation_time,
number_of_doctors = number_of_doctors,
warm_up_period = 2000L,
data_collection_period = 4000L,
number_of_runs = 20L,
verbose = FALSE,
cores = 1L
)<- runner(param)[["run_results"]]
run_results
# Get overall results, using queueing theory notation in column names
<- run_results |>
results summarise(
RO = mean(.data[["utilisation_doctor"]]),
Lq = mean(.data[["mean_queue_length_doctor"]]),
W = mean(.data[["mean_time_in_system"]]),
Wq = mean(.data[["mean_wait_time_doctor"]])
)
results }
For example:
run_simulation_model(interarrival_time = 5L,
consultation_time = 10L,
number_of_doctors = 3L)
# A tibble: 1 × 4
RO Lq W Wq
<dbl> <dbl> <dbl> <dbl>
1 0.671 0.866 14.3 4.31
Test comparing simulation with mathematical model results
Our test function test_simulation_against_theory()
uses @pytest.mark.parametrize
to run several different test cases.
For each of these, it compares the results from the simulation model (from run_simulation_model()
) with the results from the mathematical model (MMSQueue
) and checks they are reasonably similar.
@pytest.mark.parametrize(
"interarrival_time,consultation_time,number_of_doctors",
[# Test case 1: Low utilisation (ρ ≈ 0.3)
10, 3, 2),
(# Test case 2: Medium utilisation (ρ ≈ 0.67)
6, 4, 2),
(# Test case 3: M/M/1 (ρ = 0.75)
4, 3, 1),
(# Test case 4: Multiple servers, high utilisation (ρ ≈ 0.91)
5.5, 5, 3),
(# Test case 5: Balanced system (ρ = 0.5)
8, 4, 1),
(# Test case 6: Many servers, low individual utilisation (ρ ≈ 0.63)
4, 10, 4),
(# Test case 7: Very low utilisation (ρ ≈ 0.167)
60, 10, 15),
(
],
)def test_simulation_against_theory(
interarrival_time,
consultation_time,
number_of_doctors
):"""Test simulation results against theoretical M/M/S queue calculations."""
# Create theoretical M/M/S queue model and get metrics
= 1 / interarrival_time
lam = 1 / consultation_time
mu = MMSQueue(lam, mu, number_of_doctors).metrics
theory
# Run simulation
= run_simulation_model(
sim =interarrival_time,
interarrival_time=consultation_time,
consultation_time=number_of_doctors
number_of_doctors
)
# Compare results with appropriate tolerance (round to 3dp + 15% tolerance)
= [
metrics "rho", "Utilisation"),
("L_q", "Queue length"),
("W_q", "Wait time"),
("W_s", "System time")
(
]for key, label in metrics:
= round(sim[key], 3)
sim_val = round(theory[key], 3)
theory_val assert sim_val == pytest.approx(theory_val, rel=0.15), (
f"{label} mismatch: sim={sim_val}, theory={theory_val}"
)
============================= test session starts ==============================
platform linux -- Python 3.11.9, pytest-8.4.1, pluggy-1.6.0
rootdir: /__w/des_rap_book/des_rap_book/pages/verification_validation
plugins: anyio-4.11.0, timeout-2.4.0
collected 7 items
tests_resources/test_mms.py ....... [100%]
============================== 7 passed in 2.48s ===============================
<ExitCode.OK: 0>
Our test function uses patrick
to run several different test cases.
For each of these, it compares the results from the simulation model (from run_simulation_model()
) with the results from the mathematical model from queueing
, and checks they are reasonably similar.
library(patrick)
library(testthat)
::with_parameters_test_that(
patrick"simulation is consistent with theoretical MMs queue calculations.",
{# Create theoretical M/M/s queue model
<- 1L / interarrival_time
lambda <- 1L / consultation_time
mu <- queueing::NewInput.MMC(
i_mmc lambda = lambda, mu = mu, c = number_of_doctors, n = 0L, method = 0L
)<- queueing::QueueingModel(i_mmc)
theory
# Run simulation
<- run_simulation_model(
sim interarrival_time = interarrival_time,
consultation_time = consultation_time,
number_of_doctors = number_of_doctors
)
# Compare results with appropriate tolerance (round to 3dp + 15% tolerance)
<- list(
metrics c("RO", "Utilisation"),
c("Lq", "Queue length"),
c("W", "System time"),
c("Wq", "Wait time")
)for (metric in metrics) {
<- metric[1L]
key <- metric[2L]
label
<- round(sim[[key]], 3L)
sim_val <- round(theory[[key]], 3L)
theory_val
expect_equal(
sim_val,
theory_val,tolerance = 0.15, # 15% relative tolerance
info = sprintf(
"%s mismatch: sim=%.3f, theory=%.3f", label, sim_val, theory_val
)
)
}
},::cases(
patrick# Test case 1: Low utilisation (ρ ≈ 0.3)
list(interarrival_time = 10L,
consultation_time = 3L,
number_of_doctors = 2L),
# Test case 2: Medium utilisation (ρ ≈ 0.67)
list(interarrival_time = 6L,
consultation_time = 4L,
number_of_doctors = 2L),
# Test case 3: M/M/1 (ρ = 0.75)
list(interarrival_time = 4L,
consultation_time = 3L,
number_of_doctors = 1L),
# Test case 4: Multiple servers, high utilisation (ρ ≈ 0.91)
list(interarrival_time = 5.5,
consultation_time = 5L,
number_of_doctors = 3L),
# Test case 5: Balanced system (ρ = 0.5)
list(interarrival_time = 8L,
consultation_time = 4L,
number_of_doctors = 1L),
# Test case 6: Many servers, low individual utilisation (ρ ≈ 0.63)
list(interarrival_time = 4L,
consultation_time = 10L,
number_of_doctors = 4L),
# Test case 7: Very low utilisation (ρ ≈ 0.167)
list(interarrival_time = 60L,
consultation_time = 10L,
number_of_doctors = 15L)
) )
══ Testing test_mms.R ══════════════════════════════════════════════════════════
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 2 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 3 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 4 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 5 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 6 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 8 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 9 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 10 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 11 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 12 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 13 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 14 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 15 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 16 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 17 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 18 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 19 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 20 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 21 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 22 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 23 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 24 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 25 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 26 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 27 ]
[ FAIL 0 | WARN 0 | SKIP 0 | PASS 28 ] Done!
Explore the example models
Nurse visit simulation
Click to visit pydesrap_mms repository
Key files | tests/test_mms.py |
Click to visit rdesrap_mms repository
Key files | tests/testthat/test-mms.R |
Stroke pathway simulation
Not relevant - model too complex.
Test yourself
Try adding this test to the model set-up using this book.