\documentclass[a4paper]{article} \title{A category of interfaces and
components\\\textbf{\Huge $\Rightarrow$ DRAFT $\Leftarrow$ }} \author{Peter G. Hancock}
\date{Updated Nov 8th: \\ {\small concatenated with notes on predicate
transformers.}}
%\usepackage{euler}
\usepackage{mathlig}
\usepackage{graphics}
\usepackage{color}
\usepackage{amssymb}
\usepackage{varioref}
\usepackage[active]{srcltx}
\input{rules}
% Stylistic things
\renewenvironment{itemize}%
{\begin{list}%
{--}%
{%\addtolength{\partopsep}{-1.9em}
\setlength{\parskip}{1ex}
\setlength{\itemsep}{0pt}
\setlength{\topsep}{0pt}}}
{\end{list}}
\raggedright
\setlength{\parskip}{1ex}
%% "English"
\newcommand{\adlib}{\mbox{\it ad lib}}
\newcommand{\ala}{\mbox{\it \`a la}}
\newcommand{\apropos}{\mbox{\it \`a propos}}
\newcommand{\eg}{\mbox{\it e.g.}}
\newcommand{\etc}{\mbox{\it etc.}}
\newcommand{\ie}{\mbox{\it i.e.}}
\newcommand{\naive}{na\"{\i}ve}
\newcommand{\primafacie}{\mbox{\it prima facie}}
\newcommand{\role}{\mbox{\it r\^{o}le}}
\newcommand{\roles}{\role{s}}
\newcommand{\versus}{\mbox{\it vs.}}
\newcommand{\via}{\mbox{\it via}}
\newcommand{\demonic}{diabolical}
\renewcommand{\demonic}{demonic}
%\newcommand{\REL}{\ensuremath{\mathit{Rel}}}
\newcommand{\ST}{\mbox{\textit{st}}}
\newcommand{\IS}{\mbox{\textit{is}}}
\newcommand{\TS}{\mbox{\textit{ts}}}
\newcommand{\PT}{\mbox{\textit{pt}}}
\newcommand{\REL}{\mbox{\textit{rel}}}
%% Slides
%\newcommand{\blackandwhite}[1]{\input{#1}}
%\newcommand{\ST}[1]{%
% \begin{center}%
% \textbf{ \color{blue} \Large #1}%
% \end{center}%
%}
%% Slide headings
%\newcommand{\HH}[1]{\ensuremath{\underline{\mbox{\small \textit{#1}}}}}
% Comments
\newcommand{\BC}{\mbox{(\*}}
\newcommand{\EC}{\mbox{\*)}}
\newcommand{\CC}[1]{\hspace{.4em}\ensuremath{\mbox{\small \BC{} \textit{#1} \EC{}}}}
% Maths
\newcommand{\V}[1]{\ensuremath{\mbox{\textit{#1}}}}
\newcommand{\C}[1]{\ensuremath{\mbox{\textsc{#1}}}}
\newcommand{\Set}{\ensuremath{\mbox{Set}}}
\newcommand{\Type}{\ensuremath{\mbox{Type}}}
\newcommand{\Pow}[1]{\ensuremath{\mathit{Pred(#1)}}}
\newcommand{\Fam}[1]{\ensuremath{\mathit{Fam(#1)}}}
\renewcommand{\Pow}[1]{\ensuremath{{\cal P}(#1)}}
\renewcommand{\Fam}[1]{\ensuremath{{\cal F}(#1)}}
\renewcommand{\Pow}[1]{\ensuremath{{\mathbb P}(#1)}}
\renewcommand{\Fam}[1]{\ensuremath{{\mathbb F}(#1)}}
\newcommand{\PowPow}[1]{\Pow{\Pow{#1}}}
\newcommand{\FamFam}[1]{\Fam{\Fam{#1}}}
\renewcommand{\sum}[1]{\ensuremath{(\exists\,#1)\;\;}}
\renewcommand{\prod}[1]{\ensuremath{(\forall\,#1)\;\;}}
\newcommand{\lfp}[1]{\ensuremath{(\mu\,#1)\;\;}}
\newcommand{\SET}[2]{\ensuremath{\{\,#1\,|\,#2\,\}}}
\newcommand{\SING}[1]{\ensuremath{\{\,#1\,\}}}
%\newcommand{\PAIR}[1]{\ensuremath{\langle #1 \rangle}}
%\newcommand{\dual}{\ensuremath{\sim}}
\newcommand{\BLANK}{\ensuremath{\mbox{\small \_\,}}}
\newcommand{\iso}{\ensuremath{\cong}}
% RC
\renewcommand{\sup}[1]{\ensuremath{\sqcup}_{#1}}
\renewcommand{\inf}[1]{\ensuremath{\sqcap}_{#1}}
\newcommand{\chaos}{\mbox{chaos}} % function
\newcommand{\void}{\mbox{void}} % function
\newcommand{\emptyS}{\ensuremath{\{ \}}}
\newcommand{\fullS}{\ensuremath{\mbox{Triv}}}
\newcommand{\emptyR}{\ensuremath{\mbox{empty}}}
\newcommand{\fullR}{\ensuremath{\mbox{full}}}
%\newcommand{\meets}{\ensuremath{\mathrel{\mbox{)}\!\!\mbox{(}}}}
\newcommand{\overlaps}{\ensuremath\mathrel{\between}}
%\newcommand{\overlaps}{\between}
\newcommand{\SEQ}{\mathop{;}}
\newcommand{\SKIP}{\textsc{skip}}
\newcommand{\eq}{\textsc{eq}}
\newcommand{\ABORT}{\textsc{abort}}
\newcommand{\MAGIC}{\textsc{magic}}
% Angels and demons
\newcommand{\angel}[1]{\ensuremath{\mbox{$#1$}{}^{\circ}}}
\newcommand{\demon}[1]{\ensuremath{\mbox{$#1$}{}^{\bullet}}}
\newcommand{\angelU}[1]{\ensuremath{\{ #1 \}}}
\renewcommand{\angelU}[1]{\ensuremath{\langle #1 \rangle}}
\newcommand{\demonU}[1]{\ensuremath{[\,{#1}\,]}}
\newcommand{\inv}[1]{\ensuremath{{#1}^{\sim}}}
\newcommand{\invPT}[1]{\ensuremath{{#1}^{\sim}}}
\renewcommand{\invPT}[1]{\ensuremath{{#1}{}^{{}{-}{}}}}
\newcommand{\reverse}{\inv}
% Partial functions
\newcommand{\dom}[1]{\ensuremath{\mbox{dom}(#1)}}
\newcommand{\rest}[2]{\ensuremath{#1 \;\mbox{restricted to}\;#2}}
\renewcommand{\rest}[2]{\ensuremath{#2 \mathop{\upharpoonleft}#1}}
%\renewcommand{\rest}[2]{\ensuremath{\mbox{grd} #2 -> #1}}
% repetition
% operators on homogeneous arrows
\newcommand{\opt}[1]{\ensuremath{{#1}^{0,1}}}
\renewcommand{\opt}[1]{\ensuremath{{#1}^{?}}}
\newcommand{\tc}[1]{\ensuremath{{#1}^{+}}}
\newcommand{\rtc}[1]{\ensuremath{{#1}^{\ast}}}
\newcommand{\ctr}[1]{\ensuremath{{#1}^{\infty}}}
\newcommand{\dual}[1]{\ensuremath{{#1}^{\bot}}}
\newcommand{\invpt}[1]{\dual{#1}}
%\newcommand{\opt}[1]{{#1}{}^?} %{(=)}}
%\newcommand{\rtc}[1]{{#1}^\ast}
\newcommand{\rti}[1]{\ctr{#1}}
%\newcommand{\tc}[1]{{#1}^+}
\newcommand{\win}{\mbox{\textit{win}}}
\renewcommand{\sin}{\mbox{\textit{sin}}}
\newcommand{\graph}[1]{\ensuremath{\mbox{\bf graph}\,#1}}
\newcommand{\guard}[1]{\ensuremath{\mbox{\bf guard}\;#1\,}}
\newcommand{\assign}[1]{\ensuremath{\mbox{\bf assign}\,#1}}
% Logic
\newcommand{\logand}{\mathop{\wedge}}
\newcommand{\logor}{\mathop{\vee}}
\newcommand{\lognot}{\mathop{\sim}}
\newcommand{\logimp}{\mathop{\Rightarrow}}
\newcommand{\logall}[1]{\mathop{(\forall #1)\,}}
\newcommand{\logsome}[1]{\mathop{(\exists #1)\,}}
\newcommand{\inl}[1]{\ensuremath{\mathbf{i}\,#1}}
\newcommand{\inr}[1]{\ensuremath{\mathbf{j}\,#1}}
\newcommand{\True}{\ensuremath{\mathit{True}}}
\newcommand{\False}{\ensuremath{\mathit{False}}}
\mathlig{->}{\to}
\mathlig{=>}{\Rightarrow}
\mathlig{<|}{\lhd}
\mathlig{<}{\langle}
\mathlig{>}{\rangle}
\mathlig{<=>}{\Longleftrightarrow}
%\mathlig{==}{\hspace{1ex}=_{\mbox{\small def}\hspace{1ex}}}
\mathlig{==}{\stackrel{\scriptscriptstyle \Delta}{=}}
\mathlig{|=}{\ensuremath{\mathrel{\epsilon}}}
\mathlig{|><}{\ensuremath{\ltimes}}
\mathlig{-+->}{\ensuremath{\mathop{\makebox[0cm][l]{+}%
\makebox{$\rightarrow$}}%
}}
\mathlig{|->}{\ensuremath{\mapsto}}
\mathlig{><}{\ensuremath{\times}}
\newcommand{\overwrite}{\ensuremath{\mathop{\mbox{\ overwritten by\ }}}}
%
% To be merged...
%\newcommand{\Fam}[1]{\ensuremath{{\mathbb{F}}#1}}
%\newcommand{\Pow}[1]{\ensuremath{{\mathbb{P}}#1}}
%\newcommand{\SING}[1]{\ensuremath{\{\,#1\,\}}}
%\newcommand{\SET}[2]{\ensuremath{\{\,#1\,|\,#2\,\}}}
%\newcommand{\FamFam}[1]{\Fam{(\Fam{#1})}}
%\newcommand{\Set}{\ensuremath{\mbox{Set}}}
%\newcommand{\inl}[1]{\ensuremath{\mbox{in}_0\,#1}}
%\newcommand{\inr}[1]{\ensuremath{\mbox{in}_1\,#1}}
%\newcommand{\angelU}[1]{\assert{#1}}
%\newcommand{\demonU}[1]{\assume{#1}}
%\newcommand{\BLANK}{\ensuremath{\mbox{\small \_\,}}}
%\newcommand{\C}[1]{\mbox{\textit{#1}}}
%\newcommand{\id}{\mbox{id}} % function
%\renewcommand{\eq}{\mbox{(=)}} % relation
%%\newcommand{\SKIP}{\mbox{id}} % pt
%\newcommand{\MAGIC}{\mbox{magic}}
%\newcommand{\ABORT}{\mbox{abort}}
%\newcommand{\SEQ}{\ensuremath{\mathrel{;}}}
\newcommand{\FUN}[1]{\ensuremath{(\lambda\, #1)}}
%\newcommand{\assert}[1]{\ensuremath{\{ #1 \}}}
\newcommand{\assert}[1]{\ensuremath{\langle #1 \rangle}}
\newcommand{\assume}[1]{\ensuremath{[\, #1\, ]}}
%\newcommand{\reverse}[1]{\ensuremath{\mbox{$#1$}^{\sim}}}
%\newcommand{\inv}[1]{\reverse{#1}}
%\newcommand{\invpt}[1]{\ensuremath{\mbox{$#1$}^{-}}}
%\newcommand{\graph}[1]{\ensuremath{\mbox{$|#1|$}}} % relation
%\newcommand{\assign}[1]{\ensuremath{\langle #1 \rangle}} % pt
%\newcommand{\guard}[1]{\ensuremath{ #1 \mathbin{\upharpoonleft}}} % relation
\newcommand{\IF}[3]{\ensuremath{
\;\mbox{\textbf{if}}\; #1 \;
\mbox{\textbf{then}}\; #2 \; \mbox{\textbf{else}}\; #3}}
\newcommand{\WHERE}[1]{\;\mbox{\textbf{where}} \;#1}
\newcommand{\plift}[1]{\ensuremath{\mbox{\textbf{test}\,}#1}}
\newcommand{\test}[1]{\plift{#1}}
\renewcommand{\graph}[1]{\ensuremath{\mbox{\bf graph}\,#1}}
\renewcommand{\guard}[1]{\ensuremath{\mbox{\bf guard}\,#1\,}}
\renewcommand{\guard}[1]{\ensuremath{#1 \rightharpoonup {}}}
\renewcommand{\assign}[1]{\ensuremath{\mbox{\bf assign}\,#1}}
% Alternate notations for assert/assume.
%\renewcommand{\assert}[1]{\ensuremath{(\mbox{\bf assert}\;#1)}}
%\renewcommand{\assume}[1]{\ensuremath{(\mbox{\bf assume}\;#1)}}
%\renewcommand{\assert}[1]{\ensuremath{< #1 |}}
%\renewcommand{\assume}[1]{\ensuremath{| #1 >}}
%%%math ligatures<<<1
%%% doesn't work if I put those before the \begin{document} ???
\mathlig{->}{\to}
\mathlig{=>}{\Rightarrow}
\mathlig{<=}{\Leftarrow}
\mathlig{<|}{\lhd}
\mathlig{|><}{\ltimes}
\mathlig{<}{\langle}
\mathlig{>}{\rangle}
\mathlig{<=>}{\Longleftrightarrow}
\mathlig{|=}{\ensuremath{\mathrel{\epsilon}}}
%\mathlig{==}{\ensuremath{\;\mathrel{=_{def}}\;}}
\mathlig{==}{\;\stackrel{\scriptscriptstyle \Delta}{=}\;}
\mathlig{|->}{\mapsto}
\mathlig{:=}{\mathrel{\texttt{:=}}}
\begin{document}
\maketitle
\tableofcontents
\newpage
\section{Introduction}
The notion of `interface' pervades the use and construction of many kinds of
artefact, particularly computer programs. A common form of interface
is a command-response or imperative interface. In such an interface
the user issues commands, to which the system responds, in strict
alternation. In computer software, issuing a command is realised by
passing control to a procedure or method, and a response by
returning control when the procedure terminates.
Programmers rarely write self-standing programs. Typically, they rely
on resources of various kinds, such as libraries\footnote{Commonly
shared, dynamically linked libraries.}, the primitives of an
operating system or language `runtime', and low-level interfaces or
APIs (Application Programming Interfaces). They may even be
\emph{writing} libraries, or device drivers rather than user-level
executable programs. They usually collaborate in teams with other
programmers, each responsible for some piece of a large system.
Even when a single person writes software to run directly `on the
metal', they divide the problem into brain-sized chunks. The boundaries
of these chunks are interfaces.
The product of a programmer's effort is in reality always something
partial, a \emph{component} of a larger \emph{system}. Commonly
this takes the form of a module, exporting one `high-level' interface
(for example files and directory trees) by making use of another `low-level'
interface (for example disk drives and segments of magnetic media).
Sooner or later every programmer is bound to ask themselves
``what \emph{is} an interface?'', and ``what \emph{is} a component?''.
This paper offers an answer to these questions in the
form of a mathematical structure, namely an enriched category in which
the objects represent command-resource interfaces, and
the morphisms represent components that map (or reduce) one interface
to another. The hom-sets are partially ordered, and the order represents
a certain kind of refinement or improvement between components.
It is characteristic of a program component or partial construction
that it has two `poles', like the poles of a magnet. The poles of a
component are \emph{interfaces} -- one that is imported, which the
component requires or relies on, and one that is exported, which it
supplies, provides or implements (on `top' of the imported interface).
There is, as it were, a conditional guarantee: the exported interface
will work properly \emph{provided} that the imported one works
properly. One may perhaps think of the exported interface as the
positive pole (something valuable), and the imported interface as the
negative (a cost).
In practice the imported and exported interfaces of a real component
commonly have a great deal of structure, and may be subdivided in
various ways, and combined with administrative data. In detail,
problems may arise in finding suitable components, wiring them up,
organising their start-up. There are no solutions to these problems
in what follows\footnote{Subdivision of interfaces is I presume
related to products and sums of interfaces.}.
%\paragraph{The big picture}
Setting many problems aside, a picture for the programmer's
task is this:
\begin{picture}(400,150)(-25,75)
\renewcommand{\downarrow}{\mbox{\hspace{1em}}}
\put(100,125){\framebox(100,50)}
\put(60,150){\makebox(0,0){\shortstack{Export $\sqsubseteq$ %\\ $E$
}}}
\put(240,150){\makebox(0,0){\shortstack{$\sqsubseteq$ Import%\\ $I$
}}}
\put(100,177){\oval(50,50)[tr]}
\put(125,177){\vector(0,-1){0}}
\put(125,178){\makebox(0,0)[b]{$\downarrow c$}}
\put(200,177){\oval(50,50)[tl]}
\put(175,177){\vector(0,-1){0}}
\put(175,178){\makebox(0,0)[b]{$\downarrow \overline{r}$}}
\put(200,123){\oval(50,50)[bl]}
\put(200,98){\vector(1,0){0}}
\put(175,117){\makebox(0,0)[b]{$\downarrow \overline{c}$}}
\put(100,123){\oval(50,50)[br]}
\put(100,98){\vector(-1,0){0}}
\put(125,117){\makebox(0,0)[b]{$\downarrow r$}}
%\put(100,150){\makebox(0,0)[r]{$\Longleftarrow$}}
%\put(200,150){\makebox(0,0)[l]{$\Longleftarrow$}}
\put(150,200){\makebox(0,0){Input}}
\put(150,100){\makebox(0,0){Output}}
%\put(150,150){\makebox(0,0){ $\sqsubseteq$ }}
%
%\qbezier(200,202)(225,202)(240,150)
%\qbezier(240,150)(255,98)(280,98)
%\put(280,123){\oval(50,50)[br]}
%\put(280,98){\vector(-1,0){0}}
%
%\qbezier(200,98)(225,98)(240,150)
%\qbezier(240,150)(255,202)(280,202)
%\put(280,177){\oval(50,50)[tr]}
%\put(305,177){\vector(0,-1){0}}
%
%\put(280,175){\line(1,0){50}}
%\put(280,125){\line(0,1){50}}
%\put(280,125){\line(1,0){50}}
\end{picture}
The task is to `fill the box'. In this picture the horizontal
dimension shows interfaces. The exported interface is at the left
(the `outside'), and the imported interface at the right (`the
inside'). The vertical dimension shows communication events (calls to
and returns from procedures), with data flowing from top to bottom:
$c$ and $\overline{r}$ communicate data from the environment, while
$r$ and $\overline{c}$ communicate data to the environment. The
labels $c$ and $r$ constitute events in the higher level interface,
while $\overline{c}$ and $\overline{r}$ are at the lower level. The
pattern of communication is that first there is a call $c$, then some
number of repetitions of pairs $\overline{c}\overline{r}$, then
finally a return $r$. One may think of the $c$'s and the $r$'s as
opening and closing brackets of two different kinds (\eg{} round and
square brackets) in a language of balanced strings denoted by the
regular expression:
\[
(c (\overline{c} \overline{r})^\ast r)^\ast
\]
This regular expression abstracts away the nature of the calls and
returns, and the way they depend on the previous history of
interactions.
The picture this gives of the assembly of a complete system
% from software and hardware components
is that one has a series of to-be-filled boxes, with input arrows
linked to output arrows with wiring reminiscent of the Greek
letter $\chi$ or a cross-bar. This cross-bar stands for composition,
or putting together components to form a complete system. It
plays a similar role for interactive systems to the piping combinator
for Unix `filter' programs that consume and produce streams.
\setlength{\unitlength}{0.30mm}
\begin{picture}(400,140)(-50,85)
\renewcommand{\downarrow}{\mbox{\hspace{1em}}}
\put(100,125){\framebox(100,50)}
%
\put(-5,178){\makebox(0,0)[b]{$\downarrow \overline{r}$}}
\put(-5,117){\makebox(0,0)[b]{$\downarrow \overline{c}$}}
\put(20,177){\oval(50,50)[tl]}
\put(20,123){\oval(50,50)[bl]}
\put(-5,177){\vector(0,-1){0}}
\put(20,175){\line(-1,0){50}}
\put(20,125){\line(0,1){50}}
\put(20,125){\line(-1,0){50}}
\put(20,98){\vector(1,0){0}}
%
\qbezier(20,202)(45,202)(60,150)
\qbezier(60,150)(75,98)(100,98)
\put(100,123){\oval(50,50)[br]}
\put(100,98){\vector(-1,0){0}}
%
\qbezier(20,98)(45,98)(60,150)
\qbezier(60,150)(75,202)(100,202)
%\put(60,150){\makebox(0,0){\shortstack{Export\\ $E$}}}
%\put(240,150){\makebox(0,0){\shortstack{Import\\ $I$}}}
\put(100,177){\oval(50,50)[tr]}
\put(125,177){\vector(0,-1){0}}
\put(125,178){\makebox(0,0)[b]{$\downarrow c$}}
\put(200,177){\oval(50,50)[tl]}
\put(175,177){\vector(0,-1){0}}
\put(175,178){\makebox(0,0)[b]{$\downarrow \overline{r}$}}
\put(200,123){\oval(50,50)[bl]}
\put(200,98){\vector(1,0){0}}
\put(175,117){\makebox(0,0)[b]{$\downarrow \overline{c}$}}
\put(100,123){\oval(50,50)[br]}
\put(100,98){\vector(-1,0){0}}
\put(125,117){\makebox(0,0)[b]{$\downarrow r$}}
%\put(100,150){\makebox(0,0)[r]{$\Longleftarrow$}}
%\put(200,150){\makebox(0,0)[l]{$\Longleftarrow$}}
%\put(150,200){\makebox(0,0){Input}}
%\put(150,100){\makebox(0,0){Output}}
%\put(150,150){\makebox(0,0){ $E \sqsubseteq I$ }}
\qbezier(200,202)(225,202)(240,150)
\qbezier(240,150)(255,98)(280,98)
\put(280,123){\oval(50,50)[br]}
\put(280,98){\vector(-1,0){0}}
%
\qbezier(200,98)(225,98)(240,150)
\qbezier(240,150)(255,202)(280,202)
\put(280,177){\oval(50,50)[tr]}
\put(305,177){\vector(0,-1){0}}
\put(280,175){\line(1,0){50}}
\put(280,125){\line(0,1){50}}
\put(280,125){\line(1,0){50}}
\put(305,178){\makebox(0,0)[b]{$\downarrow c$}}
\put(305,117){\makebox(0,0)[b]{$\downarrow r$}}
\end{picture}
\paragraph{What is an interface?}
%The question that is immediately raised is `What is an interface?'.
The notion of an interface pervades the use, design and implementation
of devices of all kinds.
The structure below, whose presentation needs to be better organised,
is my best effort so far to define a mathematical structure which can
serve as a model (in the sense of applied mathematics) for a
particular important common ingredient in the notion of interface.
The word `interface' is extremely broad, with many uses and shades of
meaning covering communication phenomena in the use of artifacts of
all kinds. It is inevitable that we have to restrict ourselves to a
specific kind of interface, while trying to retain enough generality
to cover a significant amount of some important ingredient of the
unanalysed intuitive notion. Many phenomena will be out of scope, and
indeed some that are important in practice, which we would like to
analyse and understand. So be it.
\paragraph{The form of our answer}
The structure takes the form of a category (with a partial order on
the hom-sets), in which the objects model interfaces, and the
morphisms model software components, from which entire systems may be
built or configured.
What use is a category? Given a little infra-structure, and some
syntax, a category serves as the basis for a type-theory. Thus we
should have a type-theory in which the types (objects of the category)
represent interfaces, and the terms (morphisms between objects)
represent components.
The hom-sets represent, so to speak, the design space for a component;
they are partially ordered, and have a rich structure in which the
order represents the relation of replacebility or refinement between
components with the same import and export interfaces. This structure
may serve as the basis for a refinement calculus, in which equations
and inequations between components are proved by algebraic calculation.
\paragraph{Procedural interfaces}
The notion of interface to be captured in our model can be called a
\emph{procedural}, or \emph{imperative}, or \emph{command-response}
interface. In such an interface, there are two parties or
agents, that have asymmetric or dual \roles: one is the \emph{client}
(who has the initiative in all interactions), and one is the
\emph{server} (who speaks only when spoken to).
There is an exchange of a pair of messages. The client starts by issuing an instruction (or command), that the server
then commits or performs, returning a response or acknowledgement. Issuing an
instruction is very often realised by passing control to some
procedure in a program module, and commiting the instruction by
returning control back to the calling program with output variables
set up. My habit is to use the terminology of commands and responses
for both the events by which these imperative interactions are
initiated or completed, and also for the values or messages that are
exchanged.
\paragraph{Atomicity}
The model described here has a severe shortcoming\footnote{which one
might hope to remove, or at least to clarify} with respect to the
requirements of practical component-based systems: the interaction
steps which we shall study are \emph{atomic}. Although calls and
returns are different successive events in the evolution of an
interface, yet the response to a command must follow it without any
intervening event \emph{in that interface}, such as a recursively
nested call of some
kind, or a call from some other %independently scheduled
thread of activity in the running program.
% other than the calling thread.
\paragraph{Behavioural substance, not syntax}
It should be emphasised that what we are looking for here is \emph{not}
the syntactic `shape' of a command-response interface.
That is in practice coded using an interface description language such
as IDL. The information encoded is what is needed for marshalling
data for possible remote transmission, for error propagation, \etc{}.
This is a matter of parsing and unparsing.
Rather we want to capture is input-output behaviour, abstracting from
details of how input arguments and results are encoded. By input
output behaviour I mean a contract on which the programmer of a
component which imports the interface may rely, and which the
programmer of a component which exports the interface must guarantee.
We choose to ignore the way in which messages are encoded in a state
\footnote{Perhaps this can be studied using simulations, or interface
refinements.}.
%In these notes, the preceding remarks will have to do by
%way of motivation for the definitions that follow.
\paragraph{questions of constructivity and predicativity}
It may be of some interest that the category can be defined in a
constructive and predicative setting.
One reason for working in a constructive
framework is that a model set up in such a framework for program
components is automatically a `working' model, which in a sense `goes'.
A constructive proof is \textit{ipso facto} a program.
In principle, one might write executable program components in
constructive type-theory, and exploit type-checking to ensure
that they behave correctly.
In practice, one has to write programs in programming languages
other than constructive type theory. To propose otherwise would
be Quixotic.
The contribution dependent type theory can make to the practice of
software engineering may lie more at the level of specification and
high level design, than at the level of implementation, code, or
verification. Types play no real role when a program is running.
It may be more important that one can
\emph{document} the interfaces between which components are programmed
(using C, C++ or Java perhaps as a programming language) by exploiting
the notion of dependent type as analysed in constructive type theory,
with its universes and inductively defined families of sets.
As Goebbels might have said, to gain control over things, one must
first gain control of the language in which we specify them.
One can raise the question of equality only in a predicative
framework. In such a framework the power of a set need not be a set
(over which we may quantify in forming propositions) but can be a
`large' object, akin to a proper class. In an impredicative
framework, it is trivial to define singleton predicates, \ala{} Leibnitz.
%$\{s\} = \cap \SET{P : \Pow{S}}{a |= P}$
$a |= \{s\} <=> \prod{U : \Pow{S}} s |= U -> a |= U$
I regard it as an advantage of a predicative framework that
one can isolate the operation of forming singleton predicates
as a separable logical ingredient of the idea of an inductive
definition.
It is a disadvantage of a predicative framework that one loses access
to the formally straightforward impredicative foundation for both
inductive definitions with definition by well-founded recursion, and
co-inductive definitions with definition of potentially infinite
objects (and relations with which to compare them) by forms of
non-wellfounded recursion.
\newpage
\paragraph{Sources}%
\begin{itemize}
\item Hoare's process calculus CSP\cite{Ho85}. This has a
variable-binding constructor `choice' that resembles the supremum
operation of a $W$-type (apart from well-foundedness). Hoare's book
sketches an implementation of certain CSP programs (vending
machines, etc) using a lazy functional language. As they run, the
user makes choices from successivelt presented menus. The presentation of a
menu, followed by selection within it is a pattern of alternating
communication
\item dependent state machines. One can extend the usual notion of
(deterministic) state machine in an interesting direction, by
allowing the set of commands to depend on the current state, and the
set of responses to depend on the state and input command. If one
thinks of the transition function of a (Mealy) state machine as something of
type
\[ S >< C -> R >< S
\]
and then allows the set $C$ of commands to depend on the initial state,
and the set $R$ of responses to depend on both the initial state
and the input command, one arrives at the type
\[ (s : S,c : C(s))-> R(s,c) >< S
\]
This is the type of functions which map a state and a command to a
response and new state. These play a key \role{} in server programs,
as discussed below (somewhere). If one starts instead with Moore
machines, one obtains a type that plays an analogous \role{} for
client programs.
Another point of departure is the notion of non-deterministic state
machines, and its analysis with dependent types.
\item the `tree-sets' of Petersonn and Synek \cite{PS89}. Their trees were
introduced as parse trees in certain formal grammar. They provide the
right setup for describing interaction structures, and associated
notions. This setup can represents a formal grammar in which
a number of syntactical categories are defined in terms of unions of
sequential patterns:
\[
\begin{array}{ll}
S : \Set & \mbox{syntactical categories} \\
C : S -> \Set & \mbox{alternates, per category} \\
R : (s : S)->C(s) -> \Set & \mbox{conjuncts, per cat/alt}\\
n : (s : S, c : C(s))-> R(s,c) -> S & \mbox{category of conjunct}
\end{array}
\]
They gave a description of the syntax trees induced by such a
structure. This is an a simultaneous inductive definition of a
indexed family of sets of trees, where the index set is fixed ($= S$).
The definition they gave is the `nub' of the reflexive transitive
closure operator $\rtc{}$ on interaction structures.
\item several remarks by Thierry Coquand, particularly concerning
the importance and interest of simulation relations.
\item formal topology as developed over more than a decade by Giovanni
Sambin, recently with several collaborators. Interaction structures
provide examples of Sambin's `basic'-topologies (check!), giving yet
more evidence of the interest of this notion. A `basic' topology is
a little wilder than the ordinary kind of topology. Cite something.
Indicate something about Distributivity? Localisation?
\item Back and von Wright's refinement calculus \cite{BvWRC}.
This is a calculus for reasoning about inclusion between predicate
transformers (also relations \etc{}). It has a highly algebraic flavour,
and smooth ergonomical features necessary for a working calculus.
Interaction structures are essentially predicate transformers,
presented in a concrete, computational form. Predicate transformers
have been linked with the semantics of programming languages since
the earliest investigations into program correctness. The key thing
a prospective client wants from a specification of an imperative
interface is a predicate transformer that lets the client work out
what has to be established first, in order to establish (bring
about) a desired predicate in a single cycle or operation of the
interface.
I diverge from Back and von Wright's notation in a number of
respects, primarily in using ``angle brackets'' $\angelU{\BLANK}$
for angelic update. Back and von Wright use $\{\BLANK\}$, using
angle brackets for functional updates.
\item the insights and collaboration of Anton Setzer.
Anton defined a notion of `redirection' between
interfaces that is close if not identical to the notion of simulation
defined here. He has studied the situation in which
state-dependency is absent. He has studied the question of
justifying the use of coinductively defined notions in dependent
type theory.
\item the insights and collaboration of Pierre Hyvernat. Pierre has
connected interaction structures with Sambin's basic topologies:
this suggested the idea of coarsening the equivalence relation on
simulations from extensional equality to extensional equality of
saturations. He has also connected them with certain notions in
Girard's ludics, linear logic, and game-theoretical
approaches to program semantics. He has established several
properties of the category of interfaces and simulations, and shown
that this category provides (at least in an impredicative and
classical setting) a model for full linear logic.
\end{itemize}
%\subsection{Associated notes}
%Both the following are frequently updated, and will probably be
%amalgamated.
%\begin{itemize}
%\item
%\texttt{http://www.dcs.ed.ac.uk/pgh/pt.pdf} Clearer definitions, laws.
%\item
%\texttt{http://www.dcs.ed.ac.uk/pgh/summary.pdf} This.
%\end{itemize}
\section{Preliminaries and notation}
\label{sec:prel-notat}
The following is an incomplete collection of basic
notions, notations and terminology needed to present the category.
\subsection{Two notions of subset}
\label{sec:two-notions-subset}
We distinguish two notions of subset, or more precisely
two ways in which a subset of a set can be given. To present them immediately, they are
as follows.
\[
\begin{array}[t]{l}
\SET{s : S}{P(s)} \\
\SET{s_i}{i : I}
\end{array}
\]
The first picks out the subset's elements, according to whether they satisfy
a predicate. The second runs through them all, as $i$ various over $I$.
It is enlightening to present the distinction between these two notions as
a contrast between functors of different variance, called here $\Fam{\BLANK}$
and $\Pow{\BLANK}$.
In a predicative context, they are endofunctors on a `large'
category of types\footnote{It is necessary to use such a large
category only because we want to allow that the powerset of a set
need not be a set. Something else we can do is localise things to a
universe.} which has not only normal mathematical sets for its
objects, but also the type $\Set$ itself, and types built on top of it
by dependent function and product types. The morphisms between such
objects are functions, that map a domain-type into a codomain type.
Our two endofunctors are as follows.
\begin{itemize}
\item The \textbf{propositional} power $\Pow{\BLANK}$. Here one
thinks of a subset of a set $S$ as a propositional function, predicate or
criterion which distinguishes those elements of $S$ which
are members of the subset from those which do not. (Sheep and Goats.)
% by a characteristic predicate or criterion.
%
% In accordance with the Curry-Howard correspondence identify a
% proposition with the set of its proofs, and extend the idea of
% propositional function to the more general idea of set-valued
% function or indexed family of sets. That a proposition is true or
% false means that the set of its proofs is non-empty in the sense
% that one has (some reliable method to construct) a proof.
\[ \begin{array}{l}
\Pow{\_} : \Type \to \Type \\
\Pow{S} == S \to \Set \\
\CC{typical form}\;\;\;\; P = \SET{s : S}{P(s)} : \Pow{S} \\[1em]
%
\Pow{\_} : (A, B : \Type)\to \hom(A,B) \to \hom(\Pow{B},\Pow{A}) \\
\Pow{f : A \to B} \SET{b : B}{P(b)}
% : \Pow{B})
== \SET{a : A}{ P(f(a))}
%: \Pow{A}
\end{array}
\]
Note that this functor is contravariant. Its operation on morphisms
is sometimes known as the inverse-image $f^{-1}$ or precomposition $(\cdot
f)$ operator. In programming, it is a
functional state update or assignment $s := f(s)$ .
The axiom which corresponds to this notion of subset in
ZF set-theory is the separation
schema, which guarantees that the subsets of a set picked out by
predicates over that set are themselves sets.
\item The \textbf{parametric} power $\Fam{\BLANK}$. Here one
thinks of a subset of a set or type $S$ as a morphism into $S$ defined
on some other `index' set. Here there is a parametric expression
$t(i)$ with a free variable $i$ (the parameter) varying over some
index set $I$.
\[ \begin{array}{l}
\Fam{\_} : \Type \to \Type \\
\Fam{S} == \mbox{the type of pairs}\; *\;\mbox{with}\;I : \Set, t : I -> S \\
\CC{typical form}\;\;\;\; ** = \SET{t(i)}{i : I } : \Fam{S}\\[1em]
%
\Fam{\_} : (A, B : \Type)\to \hom(A,B) \to \hom(\Fam{A},\Fam{B}) \\
\Fam{f : A \to B} \SET{t(i)}{i : I}
%< I : \Set, t(\BLANK) : I \to A >
== \SET{f(t(i))}{ i : I }
%: \Fam{B}
\end{array}
\]
Note that the functor $\Fam{\BLANK}$ is co-variant. Its operation on
morphisms is most often known as direct-image $\exists_f(U) = f(U) =
\SET{f(u)}{u |= U}$, or postcomposition : $(f \cdot)$. The axiom of ZF set-theory
that is most relevant to this notion of set is the
replacement schema, which guarantees that the images of sets under functional relations
are themselves sets.
\end{itemize}
\subsection{Identity}
\label{sec:identity}
The propositional and the parametric notions of subset of a set $S$ are linked via the relation of
equality, or propositional identity between elements of the set $S$.
The relation associates with any element $s$ of a set $S$ the
\emph{singleton} predicate $\{ s \}$ which holds of just that
element.
The singleton predicate `behaves with respect to other predicates'
as follows:
\[
\SING{s} \subseteq U == U(s) == \SING{s} \overlaps U
\]
One might also write this predicate using
section notation: either $(= s)$ or $(s =)$.
In the same notation, the equality relation is written $(=)$.
%This is the essence of the notion of
%a equation or \emph{propositional identity}.
If we abstain from or confine the use of this operation to regions
which are as small as possible, the two forms of the notion of subset fall into
relief.
To obtain a predicate from a family requires use of a relation such as
equality. To obtain a family from a predicate requires quantification
over states.
There are different approaches to propositional identity.
\begin{itemize}
\item that it is a reflection of judgment-level equality. This is the
approach taken in so called extensional type-theory, in which a proof
of an propositional equation can be used to justify an equational judgement.
Adopting this approach raises problems for the mechanisation of
extensional type theory, as type-correctness is then undecidable.
\item that if $S$ is a set and if $s$ is an element of $S$,
then the singleton predicate $\{s\}$ is inductively definable as
the strongest (least) predicate satisfied by the value $s$.
This is the approach taken in intensional type-theory.
This approach is not entirely satisfactory, as in many sets (for
example sets of functions) intensional equality makes dubious
mathematical sense. Moreover proofs of intensional equations
have no computational meaning: we have no interest in their
canonical form, beyond in its mere existence.
\item that sets are intrinsically quotients. Depending on the set,
one defines \adlib{} an appropriate relation of equality between
elements of that set (\eg{} equiconvergence between Cauchy sequences
of rationals; or whatever seems appropriate, so long as it yields an
equivalence relation). Of course, one has then to confine oneself
to predicates and operators which depend only on the equivalence
class of a value, rather than on the representing value itself.
\end{itemize}
In some sense, identity (or the operator which produces a singleton
predicate) is a source or locus of non-computational phenomena in type
theory. I prefer for methodological reasons to confine use of the
general notion of singleton predicate (beyond its use in connection with
an explicit equivalence relation) to regions which are as small as
possible.
%It does not seem
%coherent\footnote{or even polite} to deny utterly any significance to
%the notion of general equality, or an operator which associates
%`polymorphically' to any set, the (doubly-indexed) family of equations
%between elements of that set. No entity without identity, whatever
%that means exactly.
The distinction between subsets \ala{} predicates and subsets \ala{}
families is grammatical: a distinction between forms of expression.
The grammatical distinction reflects the different use to which
predicate and family expressions are put. We use the predicate form
in connection with \emph{saying} something (forming a proposition),
whereas we use the latter in connection with \emph{doing} something
(computing a chosen element in the subset).
In the examples given later (??), we make use of yet another notion of
`singleton'. Given a set $S$ and an equivalence relation $=$ on $S$,
one can form for each $s \in S$ the set of elements of $S$ that are
equivalent to $s$, written $\{ s \}_{=}$ or $(= s)$. The elements of this set are
pairs, consisting of an element $s'$ of $S$ and a proof of the
equation $s = s'$. The set $\{s\}$ can be defined using the
existential quantifier with its constructive interpretation as $\sum{x
\in S} x = s$, or (in terms of the singleton predicate) as $\sum{x
\in S} x |= \{s\}$. Note that an element of a singleton set is a
value, together with a proof that it is in the appropriate equivalence
class.
If the equivalence is boolean-valued (decidable), the
canonical form of the proof of an equation can be of no interest: it
will be `tick', `ok', the sole element of the standard singleton set
-- a pure certificate.
The `interesting' part, if not the whole of an element of a singleton
set is the value itself.
\paragraph{Notation for predicate application}
I sometimes use the symbol $|=$ for `backwards' application
of predicates to values, so that if $P$ is a propositional subset of a
set $S$, and $s$ is an element of $S$, the proposition
\[ s |= P
\]
says that $P$ holds of $s$. The elements of $s |= P$ are just those
of $P(s)$.
\paragraph{Location of families with respect to predicates}
Even without use of propositional identity, it is possible to state
that a parametric subset $t = \SET{t(i)}{i : I} : \Fam{S}$ is
`located' with respect to a propositional subset $P = \SET{s : S}{P(s)} : \Pow{S}$
either by inclusion or overlapping: \\
\hspace{2cm}\begin{tabular}{l@{\hspace{1em}means\hspace{2em}}l}
$t \subseteq P$ & $(\forall i : I) P(t(i))$ \\
$t \overlaps P$ & $(\exists i : I) P(t(i))$
\end{tabular}\\{}
The splendid notation $A \overlaps B$ for overlapping (that hides an existential
quantifier in the same way as $A \subseteq B$ hides a universal quantifier) is
due to Giovanni Sambin.
Predicates can freely be compared with predicates by $\subseteq$ and
$\overlaps$. Other forms of comparision (for example between
parametric subsets) require use of the identity relation to replace a
family in `operating position' with a predicate.
\paragraph{Algebraic structure \versus{} computational content}
Predicates are closed under union and intersection of indexed
families, forming a distributive lattice, having all set-indexed suprema
and infima, notions of complement and implication. On the other hand
families are closed only under indexed union.
Predicates are connected with what we say, or specify: they support a
rich algebraic structure. Families are connected with what we do or
compute: they form an impoverished algebraic structure, which is the
price we pay for having something `computational' (a family) -- which
in the case consumes an externally selected index (or choice) and
produces a value.
%, rather than a
%something `specificational' (a predicate).
\subsection{Relations and abstract transition systems}
By a relation I mean a binary propositional function, or a predicate `with a parameter'.
A relation between $A$ and $B$ is an element of the (proper) type
\[
A -> \Pow{B}\;\; \iso % \mbox{which is isomorphic to}
\;\; \Pow{A \times B}
\]
The isomorphism will be referred to below as `Currying', as it is analogous to
replacing an ordered pair argument by two separate arguments for the
first and second coordinates.
A relation is \emph{homogeneous} if its two argument-places have the same type; for example the
equality relation on a set is homogeneous.
Roughly speaking, the algebraic structure of relations is that of predicates,
and then some. The additional `some' is essentially:
\begin{description}
\item[sequential composition] $R_1 \SEQ R_2$, with its identity $\SKIP$.
\item[division] $R_1 / R_2$. Together with sequential composition, this
satisfies the following adjunction:
\[ S \SEQ R_2 \subseteq R_1 \;\; \mbox{iff} \;\; S \subseteq (R_1 / R_2)
\]
\item[inversion] $\inv{R}$, which is an involution.
\item[closure operations] $\opt{R} $, $R^{+}$, $R^{\ast}$, for (respectively)
reflexive, transitive, and reflexive-transitive closure of a homogeneous relation $R$.
\item[graphs] %These include:\\
%(perhaps also without subscripts):\\ the empty relation
% `$\emptyR_{A,B}$' and trivial relation `$\fullR_{A,B}$'
% between given sets $A$ and $B$, \\
graphs\footnote{This needs the identity relation. A complete
account of assignment involves taking states to be records in which the
fields correspond to assignable variables.} of
functions $ f : A -> B$ written
%(perhaps without even the brackets too)
\[ \graph{ f } : A -> \Pow{B} \]
Note: As relations, the graphs of functions are both deterministic
(simple, at-most-one-valued, meaning $\inv{R} \SEQ R \subseteq 1$),
and total (entire, at-least-one-valued, meaning $1 \subseteq R \SEQ
\inv{R}$). As predicate transformers $\angelU{f} = \demonU{f} =
(\cdot f) = \Pow{f}$. (Such a predicate transformer commutes with all
intersections and unions.)
% the equality relation `${}=_A{}$' on a given set $A$, which is $< 1 > : A -> \Pow{A}$ \\
\item[tests]
A test is a relation in which the state does not change (and so
the relation must be a subset of the equality relation.
Tests are written
\[
\test{U}
\]
%Alternatively one may take just guarded assignments (partial
%functions) as basic.
Note: restriction of a relation in the domain or codomain can be effected by composing it with a test
on one side or the other.
\end{description}
%There are many other operations one can consider.
\paragraph{Spans.}
Any relation can be put into the form $\inv{(\graph{f})} \SEQ
\graph{g}$, that is of first the inverse of a function, then a
function. Such a pair of functions (essentially an element of $\Fam{A
\times B}$) is called a span, and a partial order can be defined
between spans which reflects inclusion between relations.
A similar trick works at the next level: one can factor any predicate transformer
into one which commutes with unions, followed by one which commutes with
intersections.
%
\paragraph{Categories of relations}
The category $\REL$ whose
objects are sets and whose morphisms are relations has a rich
structure on its homsets, particularly so when the relations are
homogeneous (\ie{} endomorphisms).
It helps to think first of the category of `arrows' over $\REL$, which
we might write $\REL^\downarrow$. The objects of this category are
relations $A -> \Pow{B}$ for types $A$ and $B$, and the morphisms from
$\alpha : A -> \Pow{B}$ to $\beta : C -> \Pow{D}$ are pairs of
relations $Q_1 : A -> \Pow{C}$, $Q_2 : B -> \Pow{D}$ such that we have
a subcommutative square
\[
\inv{Q_1} \SEQ \alpha \subseteq \beta \SEQ \inv{Q_2}
\]
%Here we ask for a subcommutativity property rather than a commuting diagram.
Note that the subcommutativity condition can also be written as a
simple inclusion:
$Q_1 \subseteq \inv{((\beta \SEQ \inv{Q_2}) / \alpha)}$.
%
We call this the category of relations and subcommutative squares between them.
When the domain and the codomain of a relation are the same, the
relation is said to be homogeneous. A homogeneous relation is the
same thing as an endomorphism in \REL. These form a category of
`cycles' over $\REL$, which we might write $\REL^\circlearrowright$.
The morphisms from $\alpha : A -> \Pow{A}$ to $\beta : B -> \Pow{B}$
are relations $Q : A -> \Pow{C}$ such that
\[
\inv{Q} \SEQ \alpha \subseteq \beta \SEQ \inv{Q}
\]
We call this the category of homogeneous relations and elementary
simulations. The word `elementary' is supposed to call to mind that a step
taken in the simulated homogeneous relation corresponds to a single
step in the simulating homogeneous relation.
We also need the notion of a pointed relation, or relation together
with a notion of `current' or `initial' state.
\footnote{Another idea that may need a name is a relation together with a
non-empty set of `start' or initial sets.}
When the domain and the codomain are equal, and we additionally have
a distinguished initial state, the most natural course is to require
that simulations relate the initial states. We call this the category
of abstract transition systems.
`Abstract' because the transitions are not indexed by a set. An
unlabelled transition system is just a binary relation on a set,
whereas a labelled transition system is a family of relations indexed
by labels. (An `axiom set' in Giovanni's sense is the same as a
function $S -> \Fam{\Pow{S}}$, which is a version of \emph{labelled}
transition system in which one gives for each state $s$ the set $L(s)$
of labels on transitions from that state. Each label $l : L(s)$
determines a predicate holding of states to which there is a
transition from $s$: the transition itself is perhaps the proof that
the predicate determined by $s$ and $l$ has solutions.
\subsection{Concrete transition structures and systems}
Transition structures are the concrete, computational counterpart of relations,
and relations the abstract, specificational counterpart of transition
structures.
%
Whereas a relation is something whose type has the form $A ->
\Pow{B}$, a transition structure is something whose type has the form
$A -> \Fam{B}$.
I shall use Greek letters $\phi$, $\psi$, \ldots to denote transition
structures. A transition structure $\phi
: A -> \Fam{B}$ is given by two pieces of data: a family of index sets
$\SET{T_\phi(a)}{a : A}$, and a corresponding family of indexing
functions
\[
a \mapsto \SET{a[i]_\phi}{i : T_\phi(a)}
\]
In a transition structure, a state is interpreted as a 1-dimensional
\emph{array}. (The index set may depend on the state.) In an
interaction structure it is interpreted as a 2-dimensional entity, or
generalised \emph{matrix}. (The generalisation is un-even-ness: that
the index set for a row may depend on the column.)
In comparision with relations, the algebraic structure of transition structures is
impoverished. They are closed under
neither inversion nor intersection. They are nevertheless closed
under indexed union, sequential composition, and in the homogeneous case
under reflexive, transitive
and reflexive-transitive closure.
Tangentially, transition structures support some arithmetic structure,
much the same as Cantor's order types. There are natural
definitions of zero, ordered addition, successor, multiplication, and
ordered sum that make one and multiplication a monoid, and zero and
addition a monoid commuting with multiplication on the right. The
operations preserve transitivity and well-foundedness.
Not only can transition structures be combined with other transition
structures, they can also be combined with relations in interesting
ways, yielding relations. That is to say, a transition structure can
be used as a relation-transformer. Among these uses are the
following; note that the definition of these operators
(pre-composition, post-division) does not require any use of the
identity predicate.
\[
(\phi \SEQ R) \hspace{1cm} (R / \phi )
\]
The definitions are as follows.
\[
\begin{array}{rclcl}
(\phi \SEQ R) a
&=& \SET{c}{\phi(a) \overlaps \inv{R}(c)} &=& \inv{(\angelU{\phi} \cdot \inv{R})} a \\
(R / \phi) a
&=& \SET{b}{\phi(b) \subseteq R(a)} &=& (\demonU{\phi} \cdot R) a
\end{array}
\]
In combination with inversion, these operators are sufficient to
define directly (\ie{} without use of identity) the notion of
(relational) simulation between concrete transition structures. A
simulation is a post-fixed point for a certain operator on relations
which can be defined using pre-composition and post-division by the
two transition structures.
Two transition structures $\phi : A -> \Fam{B}$ and $\psi : C \to
\Fam{D}$ determine a certain `backwards' relation transformer from
$B -> \Pow{D}$ to $A -> \Pow{C}$, whose value at a relation $R$ is the relation
which maps $a : A$ to the predicate:
\[
\SET{ c : C }{
(\forall i : T_\phi(a))
(\exists j : T_\psi(c))
R(a[i]_\phi, c[j]_\psi)}
\]
The relation transformer can also be written $R |-> \inv{((\psi \SEQ \inv{R}) / \phi)}$, or even
$((\inv{}) \cdot (/ \phi) \cdot (\psi \SEQ) \cdot (\inv{}))$ where `$\cdot$' stands for
composition of relation transformers.
%In fact
%we can define a category ``of arrows'' in which the objects are arrows of the form
%$\phi : A -> \Fam{B}$ and in which the morphisms from $\phi : A ->
%\Fam{B}$ to $\psi : C -> \Fam{D}$ are pairs of relations $Q_1 : A ->
%\Pow{C}$ and $Q_2 : B -> \Pow{D}$ such that $Q_1 \subseteq
%\inv{((\psi \SEQ \inv{Q_2}) / \phi)}$. Within it there is a
%category of cycles (with the same domain and co-domain), and simulations
%are morphisms in that category.
An \emph{elementary simulation} of transition structure $\phi : A ->
\Fam{A}$ by $\psi : B -> \Fam{B}$ is then a relation $Q : A -> \Pow{B}$
such that
\[
\inv{Q} \SEQ \phi \subseteq \psi \SEQ \inv{Q}
\]
A concrete transition system consists of a set $S$, a transition structure
$\phi : S -> \Fam{S}$, and an initial state $s_0 : S$. A elementary
simulation between transition systems is defined to be an elementary
simulation of the transition structure of one by the transition
structure of the other, which relates the two initial states. A (general)
simulation (also called a refinement) is defined to be an elementary simulation of one transition
system by the reflexive and transitive closure of the other.
\[
\mbox{\it Ref}[\tau_1 -> \tau_2] =
\mbox{\it Sim}[\tau_1 -> \tau_2^\ast]
\]
One could perhaps use the terminology ``linear'' instead of ``elementary''. Then it
is natural to call a morphism in
$\mbox{\it Sim}[\tau_1 -> \opt{\tau_2}]$ an ``affine'' simulation,
and $\mbox{\it Sim}[\tau_1 -> \rtc{\tau_2}]$ a general simulation.
The terminology isn't good.
\section{Objects: interfaces}
\label{sec:objects:-interfaces}
To recapitulate, we have two functors $\Pow{\BLANK}$ and $\Fam{\BLANK}$, and from
these two subtly different notions of relation: `true' relations $A ->
\Pow{B}$ and transition maps $A -> \Fam{B}$.
Relations (of either kind) can be presented as objects in a category
of `arrows' over the category of sets and relations, in which the
morphisms are sub-commutative squares.
When $A = B$, the arrows are in fact cycles, and
we have something that can be iterated, or combined with identity (equality) in various ways.
We say that we have a \emph{structure}.
When we also have a designated initial state, we say we have a \emph{system}.
Between systems (of either kind) we have a category in which the morphisms are simulation
relations that hold between the initial states.
A transition structure models a program whose progress from state $s :
A$ to state $s[t] : B$ requires choice of an index $t : T(s)$ (as it
were requires some external agency to supply the $t$'s in a one-way
form of interaction). A relation (in the propositional sense) models
a specification for a function $f : A -> B$, and need not be
executable or constructive in any sense.
So much for relations and transition structures, where there is one
level of choice; now we come to predicate transformers and interaction
structures. These introduce a second level (or dimension) of choice.
An interaction structure from a set $S$ to a set $S'$ is an object of type
\[
S -> \FamFam{S'}
\]
It is given by the following data:
\begin{itemize}
\item For each $s : S$ a set $C(s)$ of commands that may be issued in state $s$
\item For each $s : S$ and $c : C(s)$ a set $R(s,c)$ of responses to command $c$
issued in state $s$
\item For each $s : S$, $c : C(s)$ and $r : R(s,c)$ a state $n(s,c,r) : S'$.
\end{itemize}
For comparision, a predicate transformers from a set $A$ to a set $B$ is an
object of the type
\[
\begin{array}{ll}
& A -> \PowPow{B} \\
\iso & \CC{Curry} \\
& \Pow{A \times \Pow{B}} \\
\iso & \CC{flip arguments}\\
& \Pow{\Pow{B} \times A} \\
\iso & \CC{Curry, in reverse} \\
& \Pow{B} -> \Pow{A}
\end{array}
\]
Between predicate transformers\footnote{We are interested here in
monotone predicate transformers only}, we define
%(with quantification over predicates)
a partial order, by lifting the inclusion relation
between predicates ``pointwise'': $F \sqsubseteq G$ means $\prod{ X :
\Pow{B}} F(X) \subseteq G(X)$. Equality is then pointwise extensional equality.
Note that the definitions of inclusion and equality use quantification over predicates $X : \Pow{B}$.
In a predicative setting this raises some difficulties.
%; our aim is to replace
%$\sqsubseteq$ by an algebraically defined inequality, and to give
%a predicative analysis of other \primafacie{} impredicative notions.
An interaction structure $\Phi : S -> \FamFam{S}$ determines two monotone predicate
transformers : $\Pow{S'} -> \Pow{S}$, which I call disjunctive and conjunctive
normal form respectively. Also angelic and demonic. Also SigmaPi and PiSigma.
\[
\begin{array}{ll}
\angel{\Phi}, \demon{\Phi} : \Pow{S'} -> \Pow{S} \\
\angel{\Phi}(U) == \SET{s : S }{\sum{c : C(s)} \SET{s[c/r]}{r : R(s,c)} \subseteq U } \\
\demon{\Phi}(U) == \SET{s : S }{\prod{c : C(s)} \SET{s[c/r]}{r : R(s,c)} \overlaps U }
%\demon{P}(s) == \prod{c : C(s)}\sum{r : R(s,c)}P(s[c/r]) \\
\end{array}
\]
The former ($\angel{\BLANK}$, that is dnf) is more fundamental in the sense that
latter ($\demon{\BLANK}$, that is cnf) can be re-expressed in angelic form by
applying the axiom of choice.
We mention $\demon{\BLANK}$ again only in connection with inversion.
We do not explicitly write the function which coerces an interaction structure $\Phi$
to the predicate transformer $\angel{\Phi}$, where the ambiguity can be resolved by the context.
Observation: interaction structures are morphisms of a category IS in
which the objects are sets, and the homsets are lattices, closed under indexed suprema and infima.
Composition in the category is defined as follows.
\[
\begin{array}{ll}
C_{\Phi ; \Psi} \begin{array}[t]{l}
== \SET{s : S}{\sum{c : C_\Phi(s)}\prod{r : R_\Phi(s,c)}C_\Psi(s[c/r])}
\\
= \Phi(C_\Psi)
\end{array}\\
R_{\Phi ; \Psi}(s, < c, f > : \Phi(C_\Psi,s))
== \sum{r : R_\Phi(s,c)}R_\Psi(s[c/r]_\Phi,f(r)) \\
s[/]_{\Phi ; \Psi}
== (s[c/r]_\Phi)[f(r)/r']_\Psi
\end{array}
\]
The unit of composition we call $\SKIP$, which maps a state $s$
to the singleton family of the singleton family of $s$. (The point is
that we should have $\angel{\SKIP}$ the identity with respect to $\SEQ$.)
\[
\begin{array}{ll}
C_{\SKIP}(s)
== N_1 \\
R_{\SKIP}(s, \BLANK)
== N_1 \\
s[\BLANK/\BLANK]_{\SKIP}
== s
\end{array}
\]
Suprema are defined as follows.
\[
\newcommand{\foo}{\sup{i \in I}\Phi_i}
\begin{array}{ll}
C_{\foo}
\begin{array}[t]{l}
== \SET{s : S}{ \sum{i \in I} C_{i}(s) } \\
= \bigcup_{i \in I}C_{i}
\end{array}\\
R_{\foo}(s,**)
== R_{i}(s,c) \\
s[**/r]_{\foo}
== s[c/r]_{i}
\end{array}
\]
The supremum of the empty family is `abort', which is the worst
possible program for the angel. Thought of as a contract one might
make use of, it is entirely useless. Nothing is guaranteed.
Infima are defined as follows.
\[
\newcommand{\foo}{\inf{i \in I}\Phi_i}
\begin{array}{ll}
C_{\foo}
\begin{array}[t]{l}
== \SET{s : S}{ \prod{i \in I} C_{i}(s) } \\
= \bigcap_{i \in I} C_{i}
\end{array}\\
R_{\foo}(s,f) == \sum{i \in I }R_{i}(s,f(i)) \\
s[f/**]_{\foo} == s[f(i)/r]_{i}
\end{array}
\]
The empty infimum is sometimes called `magic', or `miracle'.
Such a resource could be used to accomplish the impossible.
(Pierre calls it `stop' .. check.)
When the codomain and the domain of an IS are the same (ie. it is
homogeneous, an endomorphism), there is a lot of further structure:
closure operations: $\opt{\BLANK}$ (reflexive closure), $\rtc{\BLANK}$
(transitive and reflexive closure), $\tc{\BLANK}$ (transitive
closure). These jointly satisfy
\[
\begin{array}{ll}
\opt{\Phi} = \SKIP \sqcup \Phi \\
\rtc{\Phi} = \opt{(\tc{\Phi})} \\
\tc{\Phi} = \Phi \SEQ \rtc{\Phi}
\end{array}
\]
They can be constructed entirely at the level of $C$'s and $R$'s.
%(Need better way of writing the following.)
The following comprises an inductive definition of the $S$-indexed family of sets
\[
C_{\rtc{\Phi}} : \Pow{S}
\]
This is followed by a definition of a function of type $\prod{s : S}C(s)->\Fam{S}$
by (well-founded) recursion on the second argument $c$. Note that strictly speaking,
we presuppose a universe of sets which contains $N_1$ and is closed under $\sum{\ldots}$.
% the indexed family of sets
%\[
%\SET{s[c/r]_{\rtc{\Phi}}}{r \in R_{\rtc{\Phi}}(s,c) } : \Fam{S}
%\]
\[
\begin{array}{ll}
C_{\rtc{\Phi}}
\begin{array}[t]{l}
== \lfp{ U : S -> \Set } \prod{s : S}{ N_1 + \Phi(U,s) \subseteq U(s) } \\
= \rtc{\Phi}(\True)
% \sum{c : C_\Phi(s)}\prod{r : R_\Phi(s,c)}C_\Psi(s[c/r])}
\end{array}
\\
R_{\rtc{\Phi}}(s,\inl{1}) == N_1
\\
R_{\rtc{\Phi}}(s,\inr{}) == \sum{r : R_{\Phi}(s,c)}R_{\rtc{\Phi}}(s[c/r]_\Phi,f(r))
\\
%R_{\rtc{\Phi}} == \begin{array}[t]{l}
% \lfp{ U : \prod{s : S}C_{\rtc{\Phi}}(s) -> \Set} \\
% R_{\rtc{\Phi}}(s,\inl{1}) == N_1 \\
% R_{\rtc{\Phi}}(s,\inr{}) == \sum{r : R_{\Phi}(s,c)}R_{\rtc{\Phi}}(s[c/r]_\Phi,f(r)) \\
% \end{array}
%\\
% \prod{ s : S, c : C_{\rtc{\Phi}}(s)} \ldots
%%%%%%%%%%%%%%(N_1 + \sum{r : R_{\Phi}(s,c)}U(s[c/r]_\Phi,f(r))
%%%%%%%%%%%%\subseteq U(s,c) \\
s[\inl{1}/1]_{\rtc{\Phi}} == s
\\
s[\inr{}/]_{\rtc{\Phi}} == (s[c/r]_\Phi)[f(r)/r']_{\rtc{\Phi}}
\end{array}
\]
For each state $s$, the elements of $C_{\rtc{\Phi}}(s)$ can be seen as
`client'-programs. An agent running it issues a sequence of commands.
After each command, it waits for a response The program then tells the
agent what to do next depending on the response. This is the
behaviour of a client. Eventually, there are no more commands to be
issued, there is an exit from the program and the run terminates.
(This the behaviour of a client which terminates.) Given such a
program $c$, the elements of $R_{\rtc{\Phi}}(s,c)$ can be seen as
traces, logs are histories of responses to the program $c$ with the
property that they lead to an exit point of the program. They are
witnesses to the fact that there are `holes' or exits in the program.
(A `hole' should not be thought of as a defect; in fact `exit' is the
same as successful completion.) Each such trace $r$ determines a
state $s[c/r]_{\rtc{\Phi}}$, which is the state of the interface when
the program exits.
\paragraph{angelic and \demonic{} update}
Relations (of either the propositional or computational variety) lift
to predicate transformers -- in two ways. If $Q$ is a relation of
type $A -> \Pow{B}$, then we define two predicate transformers, for
which we adapt the notation of Back and von Wright.
\[
\begin{array}{ll}
\angelU{\BLANK},\demonU{\BLANK} : \Pow{B} -> \Pow{A} \\
\angelU{Q}(U : \Pow{B}) == \SET{a : A}{Q(a) \overlaps U} & \CC{angelic update, assertion}\\
\demonU{Q}(U : \Pow{B}) == \SET{a : A}{Q(a) \subseteq U} & \CC{\demonic{} update, assumption}
\end{array}
\]
\[
\newcommand{\foo}{\angelU{Q}}
\begin{array}{ll}
C_{\foo}
== \angelU{Q}(\True) \\
R_{\foo}(s,*~~) == N_1 \\
s[~~~~/\BLANK]_{\foo} == s'
\end{array}
\]
\[
\newcommand{\foo}{\demonU{Q}}
\begin{array}{ll}
C_{\foo}(s) == N_1 \\
R_{\foo}(s,\BLANK) == s |= \angelU{Q}(\True) \\
s[\BLANK/~~~~]_{\foo} == s'
\end{array}
\]
Note that $\angelU{\phi}$ and $\demonU{\phi}$ make perfectly good sense
where $\phi$ is a transition structure. These operations can be
defined without use of singleton predicates.
\[
\newcommand{\foo}{\angelU{\phi}}
\begin{array}{ll}
C_{\foo} == T_\phi \\
R_{\foo}(s,t) == N_1 \\
s[t/\BLANK]_{\foo} == s[t]_\phi
\end{array}
\]
\[
\newcommand{\foo}{\demonU{\phi}}
\begin{array}{ll}
C_{\foo}(s) == N_1 \\
R_{\foo}(s,\BLANK) == T_\phi(s) \\
s[\BLANK/t]_{\foo} == s[t]_\phi
\end{array}
\]
The notation `$\angelU{\BLANK}$' is used instead of Back and von
Wright's brace notation $\{ \BLANK \}$, which collides with our use of set
theoretic use of braces. The notations $\angelU{\BLANK}$ and
$\demonU{\BLANK}$ are intended to be reminiscent of modal operators.
\paragraph{Assertions and assumptions of predicates}
An assertion of a predicate $U$ is an angelic update\footnote{The
angel is anyone on the test team, who has to ensure that a step
takes place which is described by the relation.} where the relation
does not change the state. (A `stuttering' step, in Lamport's terminology.)
The relation which constrains the angel's choice is the reflexive relation
$(\rest{\eq}{ U})$. The angel has therefore merely to establish that
$U$ holds -- there is no choice for the next state. Dual to
assertions of predicates, we also have assumptions, where the burden
of establishing the predicate is instead on the demon. The
generalisation of assertion and assumption from predicates
to general relations is clear.
%(Predicates are lifted to relation
%operators by using them as guards -- domain restriction -- particularly
%of the equality relation. Partial equivalence relations.)
\paragraph{Commuting with \ldots} Angelic update, being `existential',
commutes with arbitrary unions (is strict and defined by its action on
singletons). (This is a kind of degeneracy.) On the other hand
\demonic{} update, being `universal', commutes with arbitrary
intersections. (Also degenerate.)
Interestingly, although \demonic{} update does not in general commute
with unions, it is continuous (\ie{} commutes with unions of
\emph{directed} families) under the restriction that the relation is
total and image-finite (finite but non-empty range per element).
Continuity is an interesting property of predicate transformers,
which seems be close to the idea of \emph{implementability} for
interactive programs. Back and von Wright.
To restrict ourselves to continuous predicate transformers,
we have only to restrict \demonic{} updates to such relations, and
abstain from infinite $\sqcap$.
\paragraph{Accessibility and its dual} The following fundamental properties
of predicates with respect to a relation can be
defined as pre-fixed or post-fixed points of these update commands.
\[
\begin{array}{ll}
U \; \mbox{is $\phi$-progressive} & \demonU{\phi}U \subseteq U \\
U \; \mbox{is $\phi$-invariant} & U \subseteq \angelU{\phi}U
\end{array}
\]
The predicate transformer that assigns to a predicate $U$ the least
(\ie{} strongest) $\phi$-progressive predicate ${\cal A}(U)$ that
includes U, which is of course a closure operator, is called here the
\emph{accessibility} transformer. When $U$ is empty, the accessible
elements are also called well-founded.
%
Dually, the predicate transformer that assigns to a predicate $U$ the
greatest (\ie{} weakest) $\phi$-invariant predicate ${\cal I}(U)$ that
is still contained in $U$, which is of course an interior operator is
(here) called the \emph{safety} transformer. It picks out from a set
of states those elements from which there is an infinite stream of
$\phi$-transitions entirely in that set. When $U$ is the trivial
predicate which holds universally,
the safe states are non-wellfounded, in a `positive' sense.
\paragraph{Relational composition and division obtained from updates}
Sequential composition and division are closely connected to angelic
and \demonic{} update
respectively, %in the case of angelic update
\via{} inversion:
\[
\begin{array}[t]{ll}
(R_1 \SEQ R_2) &= \inv{(\angelU{R_1}\cdot (\inv{R_2}))} \\
(R_1 \SEQ R_2) a &= \angelU{\inv{R_2}}(R_1 a) \\
(R_1 / R_2) a &= \demonU{R_2}(R_1 a) \\
(R_1 \setminus R_2) &= \inv{(\demonU{\inv{R_1}} \cdot (\inv{R_2}))}
\end{array}
\]
In pointfree form:
\[
\begin{array}[t]{l}
(R_1 \SEQ) = (\inv{}) \cdot (\angelU{R_1} \cdot) \cdot (\inv{}) \\
(\SEQ R_2) = (\angelU{\inv{R_2}} \cdot) \\
(/ R_2) = (\demonU{R_2} \cdot) \\
( R_1 \setminus) = (\inv{}) \cdot (\demonU{\inv{R_1}} \cdot) \cdot (\inv{})
\end{array}
\]
Verification of the equations involving angelic update is
straightforward, starting from the following equations.
\[
\begin{array}[t]{ll}
(R_1 \SEQ R_2) a
& = \SET{c}{\inv{R_2}(c) \overlaps R_1(a)} \\
& = \angelU{\inv{R_2}}(R_1 a) \\
\inv{(R_1 \SEQ R_2)} c & = \SET{a}{{R_1}(a) \overlaps \inv{R_2}(c)} \\
& = \angelU{R_1}(\inv{R_2} c) \\
\end{array}
\]
The match-up with Giovanni's\footnote{beastly, horrid, \ldots} notations
seems to be as follows:
\begin{center}
\begin{tabular}{@{\hspace{1.5em}}ll@{\hspace{1.5em}}|@{\hspace{1.5em}}ll}
\multicolumn{2}{l}{\hspace{1em}\underline{Giovanni}} & \multicolumn{2}{l}{\hspace{-.5em}\underline{Back and von Wright}} \\[1.5ex]
% \hline
$ r^{-} $&\CC{existential anti-image} & $\angelU{r}$ & \CC{angelic update, assertion}\\
$ r^{\ast} $&\CC{universal anti-image} & $\demonU{r}$ & \CC{\demonic{} update, assumption}\\
$ r $&\CC{existential image} & $\angelU{\inv{r}}$ & \\
$ {r^{-}}^\ast $& \CC{universal image} & $\demonU{\inv{r}}$ & \\
\end{tabular}
\end{center}
The two notation styles do not harmonize nicely\footnote{To put it
mildly. The discord between of ${}^{\ast}$ and the usual notation
for reflexive and transitive closure is ear-splitting.}.
(It does however seem to be safe to steal from Giovanni and write just
$R(U)$ for $\angelU{\inv{R}}\,U$, and hence $\inv{R}(U)$ for
$\angelU{R}\,U$; and this is a large part of the charm of Giovanni's
notation: the overloading of the notation for application to suit
the common case.)
Our category has for its objects triples:
\[
< S, \Phi : S -> \FamFam{S}, s_0 : S >
\]
which we call \emph{interaction systems}, or (tendentiously) \emph{interfaces}.
Next we turn to its morphisms.
\section{Morphisms: components}
\label{sec:morph-comp}
What is the appropriate notion of morphism between interaction systems?
It should be a ``partial'' implementation of its exported interface, that can make use of
an implementation of its imported interface. We analyse this notion as follows:
\begin{itemize}
\item first we define an elementary simulation to be a relation
between states equipped with a certain ``back and forth'' mapping
which translates export-commands to import commands and import responses
to export responses in such a way that the illusion of an implementation
of the domain interface can be perpetually maintained.
Elementary simulations also relate the initial states.
\item It turns out that the operation of transitive and reflexive closure
is (among other interesting operations) a monad in the category of
interaction systems and elementary simulations.
\item It may be that a command in the upper-level interface requires
zero, one or more interactions with the lower-level interface before
a high-level response is ready. We model this by moving to the
Kleisli category for the monad $\rtc{\BLANK}$. This is our `real'
category. A (general) simulation\footnote{This was called a refinement in my
notes with Pierre.} of interface $A$ by interface $~~**
\FamFam{B}, b_0>$ is an elementary simulation of $A$ by the
interaction system $****$
\item we define a poset structure between simulations. This again
makes use of the transitive and reflexive closure, this time to
close the set of low-level states that can simulate a high-level
state inductively -- this closure operation is called saturation.
We treat each simulation as equal to its saturation. We place on
simulations the associated partial order, \ie{} extensional
inclusion of saturations). This order is coarser than extensional
inclusion. Simulations form a complete lattice with respect to this
partial order.
\end{itemize}
\paragraph{Definition}
Let $\Phi : A -> \FamFam{A}$ and $\Psi : B -> \FamFam{B}$ be
two interaction structures. An elementary simulation of $\Phi$ by $\Psi$
is a relation $Q : A -> \Pow{B}$ which satisfies
\[
Q(a) \subseteq \bigcap_{c : C_\Phi(a)} \Psi (\bigcup_{r : R_\Phi(a,c)} Q(a[c/r]_\Phi))
\]
We now tease apart what this says.
The relation Q should be such that for all $a$ and $b$,
\[
\begin{array}{llllll}
Q(a,b) -> & \\
& \prod{c:C_\Phi(a)} \\
& \sum{c':C_\Psi(b)} \\
& \prod{r':R_\Psi(b,c')} \\
& \sum{r:R_\Phi(b,r')} \\
& & Q(a[c/r]_\Phi,b[c'/r']_\Psi)
\end{array}
\]
This can, by using the axiom of choice, be pulled into a yet more
``computational'' form: there should exist, for all pairs of states $a : A$ and
$b : B$ such that $Q(a,b)$, functions
\[
\begin{array}{l}
\gamma : C_\Phi(a) -> C_\Psi(b) \\
\delta : %(a : A, b : B,
(c : C_\Phi(a))-> R_\Psi(a,\gamma(c)) -> R_\Phi(b,c) \\
\end{array}
\]
%(which we write in the following with their first two arguments implicit)
such that
\[ \begin{array}{l} (\forall\, %a : A, b : B,
c : C_\Phi(a), r : R_\Psi(\gamma(c))) \; %\to \\
% \hspace{2cm} Q(a,b) ->
Q(a[c/\delta(c,r)]_\Phi,b[\gamma(c)/r]_\Psi)
\end{array}
\]
If we have a simulation relation, then the proof that it is a simulation
can be used as a program to translate $\Phi$-commands to $\Psi$-commands,
and to translate the $\Psi$-responses so evoked to $\Phi$-responses
to the original command.
\paragraph{Properties of simulation}
There are a number of key properties of this notion of simulation. The
following shows that a simulation enjoys a ``sub-commutativity''
property (Lamport has the terminology `right-moving') in analogy with
the notion of simulation between relations.
\[ \angelU{\inv{Q}} \SEQ \Phi \sqsubseteq \Psi \SEQ \angelU{\inv{Q}}
\]
The following (which amounts to the same thing) states that a
simulation preserves the covering structure of an interface.
\[
\begin{array}{l}
U \subseteq \Phi(V) ->
\angelU{\inv{Q}} (U) \subseteq \Psi (\angelU{\inv{Q}}(V)) \\
\CC{which looks better in Giovanni-speak:}\;\;\;\; \\
U <|_\Phi V -> Q(U) <|_{\Psi} Q(V)
% a |= \Phi(U) -> Q(a) \subseteq \Psi (\angelU{\inv{Q}}(U)) \\
% \CC{which looks better in Giovanni-speak:}\;\;\;\; a <|_\Phi U -> Q(a) <|_{\Psi} Q(U)
\end{array}
\]
We compare elementary simulations by relational inclusion and extensional equality.
\paragraph{Monads for elementary simulation}
It then follows that the operations of reflexive and transitive
closure $\rtc{\BLANK}$, reflexive closure $\opt{\BLANK}$, and
transitive closure $\tc{\BLANK}$ are all monads in the category of
interaction structions and elementary simulations. These correspond
to different kinds of ``flexibility'' we can add to elementary
simulations.
\paragraph{General simulations}
So much for elementary-simulations between interaction structures
(from and to the same state-space). We now define a simulation
between interaction \emph{systems} (with an initial state) to be an
elementary simulation of the exported interface by the \emph{reflexive and
transitive closure} of the imported interface (allowing zero, one or
more calls to the imported interface). We should also have $Q(a_0,b_0)$.
That is, we take for our monad the reflexive and transitive closure,
which offers maximum flexibity. Furthermore, since we are dealing
with structures with designated initial states that we require to be
related, we do not have empty simulations.
\paragraph{Their ordering}
However the partial order we put on simulations is weaker (more
coarse) than relational inclusion. We define $Q \sqsubseteq Q'$
(between simulations by a saturated interface $\Psi = \rtc{\Psi}$) to mean
\[
\prod{a : A}\rtc{\Psi}(Q(a)) \subseteq \rtc{\Psi}(Q'(a))
\]
In Giovanni's terminology, we replace relations by their saturations. We use
the monad to define the partial order.
\section{Examples of interfaces}
The following sketched examples concern memory interfaces. These are
in a sense the simplest non-trivial (history sensitive) interfaces.
They are extremely well-behaved. They can be `idempotent' in a
certain sense: meaning that repetition of writes (or replaying
sequences of writes) is harmless. Memory interfaces are `universal',
in the sense that an arbitrary state machine can be implemented by
storing its state in a memory.
Sketch of :
\begin{itemize}
\item a simple memory cell.
\item a memory cell with `bad' writes.
\item an addressible array of (simple) memory cells.
\item an addressible array of memory cells with atomic update.
\item a `unix' memory array, with interruptible operations on
sets of memory cells.
\end{itemize}
Coming attractions (maybe): memory cells
with write timestamps; to make a change from memory cells, an arbitrary (Mealy)
state machine. There are some interesting examples of cached memory in
in \cite{lampson94:_implem_coher_memor}. They are presented in a form
not too far from the %that we adhere to
below.
Although there are examples of interfaces, there ought also
to be examples of simulations.
\subsection{Conventions for interfaces}
My habit is to use constructors to represent `opcodes' of a command
set. The operands are the input arguments for that opcode. If $C$ is
an opcode, then I use $\overline{C}$ as a constructor for replies to
an instruction with opcode $C$. (This is a slight notational
gesticulation towards CCS.)
In certain cases, one of the components of the input or output
operands is a proof, typically of a decidable (\ie{} boolean)
statement. We may suppose that no computational use is made of such
an operand: it is a sheer `certificate', that can be quoted in the
preparation of other such certificates. We know perfectly well what
the form of a proof of a boolean statement is.
\subsection{Partial functions}
Many examples require some machinery for partial functions. Partial
functions are relations which are single-valued on their domain.
When there are several memory cells, it may be that one has
access to certains sets of addresses simultaneously. We usually
have a total order on the addresses and may
have access to all addresses in the union of certain sets of
disjoint intervals simultaneously.
We have not only a set $A$ of addresses but also a family $I$ of
(decidable) subsets of $A$. $I$ should be closed under intersection
and symmetric ?? difference\footnote{\ie{} $I$ should be a ring of
subsets of $A$.}. If a set of addresses is in $I$, then the
locations with those addresses can be accessed simultaneously.
In the following I write $A -+-> V$ for some implementation of the
partial functions from $A$ into $V$, where the domain of the partial
function is in $I$. Equality on $V$ is written $\sim$).
One may for example define $A -+-> V$ to be $A -> V^+$,
where $V^+$ is the set
\[
\SING{ \mbox{Unknown} } \cup \SET{\mbox{Known}\, v }{ v : V }
\]
Each such function $p : A -+-> V$ has a domain $\dom{p}$ which is a
decidable subset of $A$. Between partial functions $p, q : A -+-> V$
we have an operation of overwriting defined by
\[
(p \overwrite q) == [ a |= \dom{p} \cup \dom{q} |->
\mbox{\bf if} \; a |= \dom{q} \;
\mbox{\bf then}\, q[a]\,
\mbox{\bf else}\, p[a] ]
\]
I shall use $\sim$ for equality between partial functions.
%The domains of partial functions should form a ring of subsets of $A$
%- closed under relative complement, disjoint union, symmetric
%difference. I write this $I$. Partial functions should be closed
%under restriction by elements of $I$.
\subsection{Memory cell}
First we give an example of a simple memory cell. The part that says
that we have a memory cell is the singleton output set for a read
command (defined here to have the stored value as its only element).
\begin{description}
\item [states:] $s : V$. $V$ is the set of storable values. Let
$\sim_V$ denote equality on $V$. It is reasonable to take this to be
decidable.
\item[commands:] The set of commands is the same in every state\[
C(s) = \SING{\mbox{Read}} \cup \SET{\mbox{Write}\; v}{ v : V}
\]
\item[responses to \mbox{Read}:] In any state, there are the following
(correct) responses
\[
\begin{array}[t]{l}
R(s,\mbox{Read}) = \SET{\overline{\mbox{Read}}\;v\;x}{v : V, x : v \sim_V s} \\
s[\mbox{Read}/\BLANK] = s
\end{array}
\]
\item[responses to $(\mbox{Write}\; v)$:] There is only one reply:
\[
\begin{array}{l}
R(s,\mbox{Write}\;v) = \SING{ \overline{\mbox{Write}}} \\
s[\mbox{Write}\; v/\BLANK] = v
\end{array}
\]
\end{description}
\subsection{Faulty memory cell}
First we give an example of a faulty memory cell. A bad write can `trash'
the stored value.
\begin{description}
\item[states:] $\SING{ \mbox{Bad}} \cup \SET{ \mbox{Ok}\,v }{ v : V}$.
\item[commands:] The set of commands is the same in every state\[
C(s) = \SING{\mbox{Read}} \cup \SET{\mbox{Write}\; v}{ v : V}
\]
Note that a read is always legal.
\item[responses to \mbox{Read}:] In any state, there is at most one
(correct) response
\[
\begin{array}[t]{l}
R(s,\mbox{Read}) = \SET{\overline{\mbox{Read}}\;v\;x}{v : V, x : s \sim_V \mbox{Ok}\,v } \\
s[\mbox{Read}/\BLANK] = s
\end{array}
\]
Note that read can return only if the last write was good; otherwise
a read may hang.
\item[responses to $(\mbox{Write}\; v)$:] There are two possible replies.
\[
\begin{array}{l}
R(s,\mbox{Write}\;v) =
\SING{ \overline{\mbox{Write}}}
\cup \SING{ \overline{\mbox{BadWrite}}}\\
s[\mbox{Write}\; v/\overline{\mbox{Write}}] = \mbox{Ok}\,v \\
s[\mbox{Write}\; v/\overline{\mbox{BadWrite}}] = \mbox{Bad}
\end{array}
\]
\end{description}
\subsection{Array of memory cells} Next we consider an array of memory cells which
can be individually read or written. (Basically we use $\sqcap$ here.)
Let $A$ be the set of addresses: it too should have decidable equality.
\begin{description}
\item [states:] $s : A -> V$
\item[commands:] The set of commands is the same in every state\[
C(s) = \SET{\mbox{Read}\;a}{a : A} \cup \SET{\mbox{Write}\; a \;v}{ a : A, v : V}
\]
\item[responses to $(\mbox{Read}\;a)$:] In any state, there is only one (correct) response
\[
\begin{array}[t]{l}
R(s,\mbox{Read}\;a) = \SET{\overline{\mbox{Read}}\;v \;x}{v : V, x : s(a) \sim_V v} \\
s[\mbox{Read}\;a/\BLANK] = s
\end{array}
\]
\item[responses to $(\mbox{Write}\;a\;v)$:] There is only one reply:
\[
\begin{array}{l}
R(s,\mbox{Write}\;a\;v) = \SING{ \overline{\mbox{Write}}} \\
s[\mbox{Write}\; a \; v/\BLANK] = (a := v) s
\end{array}
\]
Here $a:=v$ stands for the operation which overwrites a memory-state (whose domain must be
equipped with a decidable equality $\sim_A$) with a new value $v$ at a given
argument $a$.
\end{description}
\subsection{Atomic array of memory cells} Next we consider an array of memory cells that
can be simultaneously (atomically) read and written. For example, we
can write a cell and return its immediately prior value.
Let $I$ be a family (a ring) of decidable subsets of $A$, closed under
finite unions and relative complement (for example finite sets of
disjoint intervals in a linear order). We intend that if a set of addresses is
in $I$, the corresponding locations may be accessed simultaneously.
%If $w : I$, let $[w -> V]$ be the set of functions into $V$ with
%domain $w$. (So $ [w -> V] == \prod{s : S} s |= w -> V$.) We use
%$\sim_V$ also to denote equality between partial functions. Let $-+->
%V = \bigcup_{w : I}[w -> V]$ be the set of partial functions into $V$
%with domain in $I$. We write $f = [ i : \dom{f} |-> f(a) ] : (-+->
%V)$. I also use $ -+-> f$ for the (covariant) operation of mapping a
%partial function onwards with $f$ : more precisely, $( -+-> f) p == [
%i : \dom{p} |-> f(p(a)) ] = \Fam{f}(p)$.
\begin{description}
\item [states:] $s : A -> V$.
\item[commands:] The set of commands is the same in every state\[
C(s) = \SET{\mbox{RW}\;w\;p}{w : I, p : A -+-> V}
\]
\item[responses to $(\mbox{RW}\;w\;p)$:]
\[
\begin{array}[t]{l}
R(s,\mbox{RW}\;w\;p) = \SET{\overline{\mbox{RW}}\;p' \;x}
{ p' : A -+-> V,
x : \dom{p'} = w
\logand \rest{s}{w} \sim_V p'} \\
s[\mbox{RW}\;w\;p/\BLANK] = s \overwrite p
\end{array}
\]
\end{description}
\subsection{Unix memory array}
This example is an abstraction of the unix system calls `read' and
`write' to read and write portions of files. These are interruptible,
and so may terminate `incompletely' or abnormally, having read or
written only part of what was asked for. Several cells can (with
luck) be simultaneously read, or simultaneously written. If they are
incomplete, writes ``trash'' the area of uncertainty, so that we are
not even allowed to ask to read a trashed cell until it has been
satisfactorily re-written. (Unix does not have such an extreme notion
of `trashing': trashed cells may safely be read, though the values are constrained
only to be sequences of bytes.)
\begin{description}
\item [states:] $A -+-> V$
%\SING{ \mbox{Unknown} } \cup \SET
% {\mbox{Known}\;v}{v : V}$.
%(This is a possible implementation of
% partial functions.)
\item[commands:] The set of commands is the same in every state\[
C(s) = \SET{\mbox{Read}\;w}{w : I} \cup \SET{\mbox{Write}\;p}{ p : A -+-> V}
\]
\item[responses to $(\mbox{Read}\;w)$:]
\[
\begin{array}[t]{l}
R(s,\mbox{Read}\;w) = \SET{\overline{\mbox{Read}}\;p \;x}{p : A -+-> V,
x : \dom{p} \subseteq w \logand (\rest{s}{\dom{p}}) \sim p} \\ % (-+-> \mbox{Known}) \; p} \\
s[\mbox{Read}\;w/\BLANK] = s
\end{array}
\]
Note: unknown cells must be written before being read.
\item[responses to $(\mbox{Write}\;p)$:]
\[
\begin{array}[t]{l}
R(s,\mbox{Write}\;p) = \SET{\overline{\mbox{Write}}\;w}{w \subseteq \dom{p}} \\
s[\mbox{Write}\;p/\overline{\mbox{Write}}\;w] =
\begin{array}[t]{l}
((\rest{s}{\dom{s} \setminus \dom{p})}) \overwrite (\rest{p}{w})
% (s \overwrite
% [ a : \dom{p} |-> \mbox{Unknown}]) \\
% \overwrite
% [ a : w |-> \mbox{Known}\; p(a)]
%
% (s \overwrite [ a : w |-> \mbox{Known}\; p(a)]) \\
% \overwrite [ a : \dom{p} \setminus w |-> \mbox{Unknown} ]
\end{array}
\end{array}
\]
Note that it is perfectly legitimate for an implementation (the demon)
to trash any write request. To rule this out, we might give a
fairness guarantee, \eg{} that persistent requests to write any
specific cell will eventually succeed.
\end{description}
\section{Loose ends }
\label{sec:concl-summ-assessm}
\paragraph{Saturation and hiding}
Undiscussed: the point of passing to the saturation.
Finitely many `silent' interactions may occur in the lower-level
interface, and they are in a sense `hidden' in a single response. We
want to allow this kind of `silencing'.
\paragraph{Saturation and universal properties}
Connected with simulation: since the equality induced with
$\rtc{\BLANK}$ is coarser than extensional equality, we should have
more universal objects, more `uniqueness' modulo saturation.
Unresolved: is $\SKIP{}$ initial? Weakly initial? There may be many
simulations, but are any distinguished in some way? What is the appropriate
notion of universality in an enriched category? Generally, what limits and colimits
do we have? We have a supremum operation for saturated simulations.
Pierre Hyvernat has established that predicate transformers and
simulations provide a model for full linear logic, in which a linear
formula is interpreted by a predicate transformer, and a proof of it
by a post-fixed point. The linear implication is in effect the
relation transformer whose post-fixed points are simulation relations.
Investigations are continuing.
\paragraph{Continuity of $\SEQ$?}
Unresolved: we know that simulations are closed under sequential
composition, which is monotone in both arguments (with respect to
extensional inclusion).
What is known about sequential composition and the lattice structure
appears to be this:
\[
\begin{array}{ll}
(\inf{i : I} \Phi_i ) \SEQ \Psi = \inf{i : I} (\Phi_i \SEQ \Psi ) \\
(\sup{i : I} \Phi_i ) \SEQ \Psi = \sup{i : I} (\Phi_i \SEQ \Psi ) \\
\Phi \SEQ (\inf{i : I} \Psi_i ) \sqsubseteq \inf{i : I} (\Phi \SEQ \Psi_i) \\
\Phi \SEQ (\sup{i : I} \Psi_i ) \sqsupseteq \sup{i : I} (\Phi \SEQ \Psi_i) \\
%
% \Phi \SEQ (\sqcap_{i : I} \Psi_i ) = \sqcap_{i : I} (\Phi \SEQ \Psi_i) &\CC{$\Phi$ conjunctive, I non-empty}\\
% \CC{ditto} &\CC{$\Phi$ conjunctive, total}\\
% \Phi \SEQ (\sqcup_{i : I} \Psi_i ) = \sqcup_{i : I} (\Phi \SEQ \Psi_i) &\CC{$\Psi$ disjunctive, I non-empty}\\
% \CC{ditto} &\CC{ disjunctive, strict}\\
\end{array}
\]
The last two inequations (which are just expressions of monotonicity)
can be strengthened to equalities for certain classes of $\Phi$.
But does sequential composition, perhaps, commute (in the right-hand
argument) with suprema in the saturation order? Suprema in the
saturation order are not unions, but closures of unions.
In general the role of saturated simulations needs to be clarified.
It may have something to do with continuity: commuting with suprema
of directed families.
According to Back and von Wright sequential composition preserves
continuity in both arguments.
\paragraph{Wipe out all differences below $\SKIP$}
The topic here is (perhaps) an embarrassment. Where $\ABORT$ is the
predicate transformer than maps all predicates to the empty predicate
(and $\MAGIC$ is the predicate transformer that maps all predicates to
the trivial predicate) we have that $\ABORT$ is isomorphic to $\SKIP$.
(This is because they have the same reflexive and transitive closure,
namely $\SKIP$.) We abstract from the difference, because as an
import interface, both $\ABORT$ and $\SKIP$ are equally useful
(\ie{} entirely useless).
\paragraph{Division}
Unresolved: is there a division operator for interaction structures analogous
to that for relations?
\paragraph{Inversion}
Undiscussed: inversion - this concerns switching imported and exported
interfaces. Servers and clients exchange roles. This involves
\angel{\BLANK} and \demon{\BLANK}. There is certainly an operator
$\invPT{\Phi} : A -> \FamFam{B}$ for $\Phi : A -> \FamFam{B}$ (note:
there is no contravariance, in contrast with relational inversion)
such that $\demon{\Phi} = \angel{\invPT{\Phi}}$.
\[
\begin{array}{ll}
\invPT{C}(s) &== \prod{c : C(s)}R(s,c) \\
\invPT{R}(s,f) &== C(s) \\
s[f/c\invPT{]} &== s[c/f(c)]
\end{array}
\]
This inversion does not seem to be an involution. The result of iterating
it twice is:
\[
\begin{array}{ll}
\invPT{\invPT{C}}(s) &== \invPT{C}(s) -> C(s) \\
\invPT{\invPT{R}}(s,\BLANK) &== \invPT{C}(s) \\
s[F/f\invPT{\invPT{]}} &== s[F(f)/f(F(f))]
\end{array}
\]
There is a more subtle kind of inversion,
which uses a kind of 4-way handshake to invert the direction of
commands and responses. This is found in practice.
The situation is that any interaction map $\Phi : A -> \FamFam{B}$ can
be expressed in the form $\angelU{\phi}; \demonU{\psi}$ where $\phi :
A -> \Fam{A'}$, $\psi : A' -> \Fam{B}$.
\[
\begin{array}{ll}
A' &== \sum{s : S}C(s) \\
T_\phi(s) &== C(s) \\
s[c] &== < s, c> \\
T_\psi < s, c> &== R(s,c) \\
**~~[r] &== s[c/r]
\end{array}
\]
We have $\invPT{\Phi} = \demonU{\phi} ; \angelU{\psi}$.
The 4-way inversion opens this up, forming $\angelU{\psi} \SEQ \demonU{\phi}$ with some
suitable initialisation and finalisation. (A proper description is lacking.)
\paragraph{Fish and foul}
Undiscussed: there are actually two dual closure operators (or 3 dual pairs if we
consider analogues of $\tc{\BLANK}$ and $\opt{\BLANK}$):
\[
\begin{array}{ll}
\rtc{\Phi} = (\mu \Psi : S -> \FamFam{S})\; \SKIP \sqcup (\Phi \SEQ \Psi) \\
\rti{\Phi} = (\nu \Psi : S -> \FamFam{S})\; \SKIP \sqcap (\invPT{\Phi} \SEQ \Psi) \\
\end{array}
\]
The operator $\rti{\BLANK}$\footnote{The $\nu$ binding needs a general analysis of greatest
fixed points and corecursion. } is directly related to Giovanni's
`fish' relation $\ltimes$.
\[ \begin{tabular}{lll}
\underline{G-notation} & & \underline{H-notation} \\
$s \ltimes_\Phi U$ & means & $s |= \rti{\Phi}(U)$. \\
$s <|_\Phi U$ & means & $s |= \rtc{\Phi}(U)$. \\
$s |=_\Phi U$ & means & $s |= \Phi(U)$.
\end{tabular}
\]
We have
\[
\begin{array}{ll}
\rtc{\Phi}(P : \Pow{S}) &= \bigcap \SET{ Q : \Pow{S}}{\ P \cup \Phi (Q) \subseteq Q} \\
\rti{\Phi}(P : \Pow{S}) &= \bigcup \SET{ Q : \Pow{S}}{\ Q \subseteq P \cap \invPT{\Phi}(Q)}
\end{array}
\]
Here we have an ambiguity in the adjective `complete' in `complete
lattice'. We mean: complete for set indexed families -- not
necessarily for predicates. The expressions above use a second order
predicate (a quantifier) in the body of the comprehension term to pick
out the sets in the intersection or union. By allowing ourselves the
operator $\rtc{\BLANK}$, we commit ourselves to saying that the second
order property (being a pre-fixedpoint of a certain operator, or a
post-fixedpoint of another) can be in some sense set-indexed.
By appeal to transfinite induction, we turn the impredicative
intersection into a union of a transfinitely indexed sequence of
progressively weaker predicates. One can think of the indexing
elements as predecessors of a certain ordinal (the closure ordinal),
and hence as of the indexing as having for its domain a genuine set.
By allowing ourselves the operator $\rti{\BLANK}$, we seem to turn
the impredicative union into an intersection of an $\omega$-sequence
of progressively stronger predicates. The contrast between a transfinitely
ordered union in the inductive case, and the $\omega$-ordered intersection
in the coinductive case is very striking.
\paragraph{win and sin}
The two operators fish $(|>< \BLANK)$ (aka $\rti{\BLANK}$) and (what
else?) fowl $(<| \BLANK)$ (aka $\BLANK^\ast$) appear to be closely
connected with the predicate transformers \win{}, and \sin{} introduced by Lamport\cite{lamport90}
which give the weakest respectively strongest invariant\footnote{ One
can take an invariant to be a predicate which is true initially and
stable (\ie{} preserved under interaction).} stronger than
respectively weaker than a given predicate. These predicate
transformers have been used to analyse concurrent algorithms such as the
`bakery' algorithm that do not require a given grain of atomicity for
access to shared variables. The exact relationships need to be
clarified.
Lamport's $\win{}$ and $\sin{}$ operators
%\cite{lamport90}
take relations to predicate transformers.
For fixed relation $\phi$, we have the following.
\begin{eqnarray}
\label{eq:1}
\win{}_\phi &=& \rti{\angelU{\phi}} \\
\sin{}_\phi &=& \rtc{\demonU{\phi}}
\end{eqnarray}
The operators $\rtc{\BLANK}$ and $\rti{\BLANK}$ seem to generalise Lamport's operators
from relations to predicate transformers. The novelty here is the use of inversion
in the definition of $\rti{\Phi}$.
\paragraph{Points}
Undiscussed: the counterpart of topological points. The interactive
counterpart of the notion of a point seems to be that of an inhabited
predicate $\alpha$ which is indefinitely extensible, sustainable, or
refinable in a certain sense. We can express this impredicatively,
quantifying over predicates $U$. It is a closure property on
$\alpha$.
\[\alpha \mbox{\ is inhabited, and\ forall $U$, }
\alpha \overlaps (<| U) -> \alpha \overlaps U
\]
Note that in formal topology, one usually has a relation $\leq$ on the
states (which must be a self-simulation\footnote{Is this true?
Palmgren has the extra properties in the form $(<| U) \cap (<| V)
\subseteq U_\leq \cap V_\leq$ (where $U_\leq$ is the $\leq$-downward
closure of $U$), and $(\leq b) \subseteq (<| \SING{b})$.} with
respect to the interaction structure), and a point is further required
to be a filter with respect to this relation. For example any
inhabited predicate of the form $(|>< V)$ (a closed set according to
Giovanni, formally an open set) has this closure property.
Such an $\alpha$ ought (perhaps) to be (roughly) the same thing as a
simulation of the interface by $\SKIP$ (or equivalently, $\ABORT$). A
bit tendentiously, we call a simulation of an interface by $\SKIP$ an
\emph{implementation} of the interface. This amounts to a predicate
$\alpha$ over the interface states which satisfies the above closure
property.
The proof that $\alpha$ is inhabited is the `active ingredient'. It
can be used as a deadlock free or non-stop server program (a perpetuum
mobile).
\paragraph{Specifications, client-server contracts}
Undiscussed: There are two kinds of specification: a server program
satisfies a specification\footnote{As it were, written on the box or packaging in
which the server program comes} of the form
\newcommand{\Init}{\mbox{Init}}
\newcommand{\Goal}{\mbox{Goal}}
\newcommand{\Inv}{\mbox{Inv}}
\[\begin{array}{ll}
\sum{s_0 : S} \Init(s_0) \wedge s_0 \ltimes \Inv{} \\
\mbox{\ie{}}\;\;\;\; \Init \overlaps (\ltimes \Inv{})
\;\;\;\;\mbox{written}\;\;\;\; \Init \ltimes \Inv{}
\end{array}
\]
whereas a client program (which terminates accomplishing a goal
predicate -- an atomic transaction) satisfies a
specification\footnote{Which tells you when you can use it, and what
you can use it to bring about} of the form
\[
\begin{array}{ll}
\prod{s : S}\Init(s) ->
s <| \Goal{} \\
\mbox{\ie}\;\;\;\; \Init \subseteq (\lhd \Goal{}) \;\;\;\;\mbox{written} \;\;\;\;
\Init \lhd \Goal{}
\end{array}
\]
The logic of the interaction between clients and servers is contained
in the following rule.
\centerline{
\infer{\Init{} \lhd \Goal{}
& \Init{} \ltimes \Inv{}}
{\Goal{} \ltimes \Inv{}}
{\mbox{\small Giovanni will have a name: compatibility?}}
}
A proof of the left hand premise is a program for a client. A proof of the
right hand premise is a program for a server. The proof that we get of the
conclusion (having run the client program against the server program,
or having `made the inference') is a new server program, after the
goal of the client program has been accomplished. (It has the same
invariant, but a new initial predicate equal to the client's goal.)
(The client program is all used up.)
%\section*{Bibliography}
\bibliographystyle{plain}
\bibliography{logic}
%\end{document}
%\documentclass[a4paper]{article}
%\usepackage{amssymb}
%\usepackage{mathlig}
%\input rules.tex
%% "English"
%\newcommand{\iff}{\mbox{\it iff}}
%%%>>>1
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\title{refinement calculus\\notes}
%\author{Peter G. Hancock}
%\begin{document}
%\date{1 Oct 2003}
%\maketitle
%\tableofcontents
%% Sambinic notation
%%
\section{syntax}
The usual setting for
Back and von Wright's refinement calculus
is higher order classical logic, with quantification over predicate variables, and a
complement operator.
Here instead we define certain of these constructions in predicative type theory in
which quantification over predicates is not allowed in propositions.
The basic judgements in which we are interested are (primarily) $U
\subseteq V$ and (secondarily, the dual judgement) $U \overlaps V$
which says that U and V are compatible. It is necessary to have a
separate judgement form such as $U \overlaps V$ in the absence of the
complement operator.
%
Here $U$ and $V$ are predicate expressions that may contain variables
of various sorts: states, state-predicates, and relations. The variables are
implicitly universally quantified. If the predicate expressions
depend on a single predicate variable X, we have a (pointwise) relation
between predicate transformers. If the predicate expressions depend
on a state variable s, we have the inclusion relation between
relations.
%
In combination with predicate transformers
$\Phi$ and iterated form, the basic relations give rise to relations
of interest in client-server programming such as the following
\[
\begin{array}{l}
U <|_\Phi V = U \subseteq \rtc{\Phi}(V)\\
U |><_\Phi V = U \overlaps \ctr{\Phi}(V)\\
%U |=_\Phi V = U \subseteq \Phi(V)
\end{array}
\]
As for predicativity, all reasoning should be essentially
`point-free', or algebraic. That is to say, proofs should be
algebraic manipulations in which free subset variables never officially
appear.
In the general case we may have beside predicates also relations with
various arities, and beside unary transformers of unary predicates also
transformers with arity of the form $< n_1, .. , n_k > -> n$
Two base types (for the two kinds of variables):
$S$ (states -- $s$, $s'$, $s_1$, \ldots) ) %
and $P$ (predicates -- $P$). %
We have one binary relation $s |= P$, meaning that state $s$ satisfies
predicate $P$. This gives rise to 3 kinds of statement:
\[
\begin{array}{ll}
s |= U & \\
s |= Q(s') & \mbox{$Q$ a relation} \\
s |= F(U) & \mbox{$F$ a predicate transformer}
\end{array}
\]
The following higher types:
\[
\begin{array}[t]{lcll}
f,g, \ldots &: S \to S & \C{state transformer } & f = g \\
R,Q, \ldots &: S \to P & \C{state relations} & R \subseteq Q\\
F,G, \ldots &: P \to P & \C{predicate transformers} & F \sqsubseteq G
\end{array}
\]
The first kind of comparison is equality between state expressions that
may have free state-variables. Might want apartness.
The second kind of comparison is equivalence and implication between statements that may have
free state-variables. Might want overlap.
The third kind of comparison is equivalence and implication between statements that have
free occurences of both state-variables and predicate-variables. Again, might want
overlap.
\section{predicates and families}
\label{sec:predicates}
Predicates over a set form a distributive lattice: closed under sup
(empty, binary, set-indexed) and inf (empty, binary, set-indexed). By
distributivity I mean that binary sups distribute over binary infs and
vice-versa. We also have implication, forms of relative complement etc.
Binary infs distribute over arbitrary sups. We also have singleton predicates.
Families on the other-hand are merely a sup-lattice with singletons. .
An obscure point to be explained: the link between families and
predicates (or transition structures and relations) involves not just
equality (which gets us one way), but also existential quantification
over states, which gets us a family from a predicate, in which the
function is first projection.
There is a question of size here: $S$ may be `large' compared to the
universe of sets we are working in.
\newpage
\section{grammar}
\begin{center}
\begin{tabular}[t]{ll@{\ensuremath{\;\;:\!:\!:= \;\;}}l}
\rule{0em}{1em}
\textit{states} &
$s, s'$ & $f (s)$ \\
\rule{0em}{2em}
\textit{state transformers} &
$f, g$ & $ \begin{array}[t]{l}
id
\; | \; f\cdot g
\end{array}
$ \\
\rule{0em}{2em}
\textit{families} &
$\alpha, \beta$ & $ \begin{array}[t]{l}
\SING{s}
\; | \;
\sqcup_i \alpha_i
\; | \; \phi(s)
\end{array}
$ \\
\rule{0em}{2em}
\textit{predicates} &
$U, V$ & $ \begin{array}[t]{l}
\alpha
\; | \; \bigcup_i U_i
\; | \; \bigcap_i U_i
\; | \; \Phi(U)
\; | \; R(s)
\end{array}
$ \\
\rule{0em}{2em}
\textit{transition structures} &
$\phi,\psi$ & $ \begin{array}[t]{l}
\graph{f} \\
\; | \; \guard{U}{\phi} \\
\; | \; \sqcup_i \phi_i \\
\; | \; \phi \SEQ \psi
\; | \; \SKIP \\
\; | \; \rtc{\phi}
\; | \; \tc{\phi}
\; | \; \opt{\phi} \\
\end{array}
$ \\
\rule{0em}{2em}
\textit{relations} &
$Q,R$ & $\begin{array}[t]{l}
\phi \\
\; | \; \guard{U}{Q} \\
\; | \; \bigcup_i Q_i
\; | \; \bigcap_i Q_i \\
\; | \; R \SEQ Q
\; | \; \SKIP
\; | \; Q / R \\
\; | \; \phi \SEQ Q
\; | \; Q / \phi \\
\; | \; \Phi \cdot Q \\
\; | \; \rtc{Q}
\; | \; \tc{Q}
\; | \; \opt{Q} \\
\; | \; \reverse{Q}
\end{array}
$ \\
\rule{0em}{2em}
\textit{interaction structures} &
$\Phi,\Psi$ & $ \begin{array}[t]{l}
\; | \; \assert{\phi}
\; | \; \assume{\phi} \\
\; | \; \sqcup_i \Phi_i
\; | \; \sqcap_i \Phi_i \\
\; | \; \Phi \SEQ \Psi
\; | \; \SKIP \\
\; | \; \assign{f} \\
\; | \; \rtc{\Phi}
\; | \; \tc{\Phi}
\; | \; \opt{\Phi} \\
\; | \; \ctr{\Phi} \\
\; | \; \invpt{\Phi}
\end{array}
$ \\
\rule{0em}{2em}
\textit{predicate transformers} &
$F,G$ & $ \begin{array}[t]{l}
\Phi \\
\; | \; \assert{R}
\; | \; \assume{R} \\
\; | \; \sqcup_i F_i
\; | \; \sqcap_i F_i \\
\; | \; F \SEQ G
\; | \; \SKIP \\
\; | \; \assign{f} \\
\; | \; \rtc{F}
\; | \; \tc{F}
\; | \; \opt{F} \\
\; | \; \ctr{F}
\end{array}
$
\end{tabular}
\end{center}
\newpage
\section{types and definitions}
\subsection{State transformers}
%\renewcommand{\arraystretch}{5}
\begin{enumerate}
\item
{composition of state transformers}
\infer{ f : A -> B \hspace{1cm} g : B -> C}{
g \cdot f : A -> C
}{}
$(f . g)(s) == f(g(s))$.
\item
{identity of state transformers}:
\hspace{1cm} $ % \[ % \infer{}{
\SKIP : A -> A
$ % \] % }{}
$\SKIP(a) == a$.
\item base state transformers
%\subsection{assignment}
%\label{sec:assignment}
Often, the state space is a product $S = \Pi_{ v : V}S_v$, where $V$
is a set of variable-names (with decidable equality) and $S_v$ is a
factor of the state space corresponding to variable $v$: the type of
$v$. Then a state is a function which assigns to a variable-name
$v$ a value of the type $S_v$ appropriate to $v$. This can also be
thought of as a record with field-names or coordinates $V$, with
field $v : V$ having type $S_v$.
If $e$ is an expression built up from variable-names using function
constants, we define by recursion on its build up the value $|e|_s$
of $e$ in the start state $s$ -- in the obvious way (see below). We
then define the update function $(v := e) : S -> S$.
\[
\begin{array}[t]{l}
(v := e) : S -> S \\
(v := e)(s) == \FUN{v' : V} %s', \WHERE{ s'(v') ==
\IF{ v' = v }
{ |e|_s }
{ s(v) }
\end{array}
\]
Thus an \emph{assignment} statement can be interpreted as a state
transformer.
We may obviously extend the definition to \emph{simultaneous}
assignment, where we have a finite partial map from variable
names to expressions. We can represent this in the usual way
with a vector $\vec{v}$ of distinct
variables, and a vector $\vec{e}$ the same length, giving for
each variable the corresponding expression.
\[
x_1, \ldots,x_n := e_1, \ldots,e_n
\]
\paragraph{Example}: $x,y := y, x$ -- atomically swap contents of variables $x$ and $y$.
The syntax of an assignment statement is essentially a function from names of
assignable variables to expressions built over those and other
`read-only' variables. The semantics of an assignment statement is
a state-transformer.
To define the semantics of an assignment statement, all that seems to be necessary is
that the domain of the syntax-function
should be decidable.
Tangentially, if we are interested in (statically) \emph{typed} variables, then one
can represent the type system in the form of an interaction structure
-- a set of sorts or types, and for each sort $\sigma$ a family of
families of sorts:
\[
\SET{
\SET{
\sigma[c/r]
}{
r : R(\sigma,c)
}
}{
c: C(\sigma)
}
\]
Here $C(\sigma)$ describes the constructors that can be used to form
an expression of a given sort $\sigma$, and for each such constructor
$c : C(\sigma)z$ an element $r$ of $R(\sigma,c)$ selects the location
of an immediate subexpression. The sort of the subexpression must
equal $\sigma[c/r]$.
An expression of sort $\sigma$ is now a well-founded tree, in which
there are `leaves' labelled by state variables. (If there are none,
the expression is closed -- the tree has finished growing.)
We might use some notation such as $\sigma <|
\gamma$ for the set of expressions which have sort $\sigma$ with
respect to type assignment $\gamma : V -> S$.
We can define the value of an expression $e : \sigma <| \gamma$ by
wellfounded recursion on the structure of $e$. (Assumes a meaning
is given to the constructors.)
An assignment statement is well-typed with respect to a
type-assignment $\gamma$ if the sort assigned to
a variable on the left hand side of the `$:=$' equals the sort
assigned to the corresponding expression on the right.
We can now define the state transformer corresponding to a well-typed
assignment statement.
%\item Because state transformers share the same domain and codomain, they can be iterated: $f^n$ where
%$n$ is a natural number.
\end{enumerate}
%
\newpage
\subsection{Category of relations and simulations}
Predicates have one parameter -- what the predicate is about.
Similarly families have one parameter -- the index of the general
term. By considering a further state-parameter, we pass from
predicates to relations, and from families to transition structures.
We thereby add to the lattice operations a `sequencing' monoid. The
unit of sequential composition is the identity relation, or in other
words the graph of the identity function.
Survey:
Closure properties. Other notes.
\begin{itemize}
\item graphs of functions are transition structures.
\item transition structures are closed under restriction by predicates (guarding).
\item transition structures are closed under sup.
\item transition structures are closed under composition, eq \etc{}.
\item in the homogeneous case, we have the usual closure operators (reflexive, transitive, etc).
\item transition functions are \emph{not} closed under converse, nor
intersection, nor division. (That is, without specific use of the
equality relation.)
\end{itemize}
Transition structures are `regular' -- form a sup-lattice (with
set-indexed sups), and have an iteration star operation. (We get back
the infs with interaction structures, and two notions of iteration.)
Transition structures form a Kleene (regular) algebra in the following
sense. It has an associative and commutative binary sup $\cup$ with
unit $0$ (the empty transition structure); associative binary
sequencing with unit \SKIP, distributing over sup. 0 is absorbing. An
iteration operator star, satisfying $\rtc{\phi}$ is a solution of the
equations $\SKIP \cup (\phi \SEQ x) \subseteq x$ and $\SKIP \cup (x
\SEQ \phi) \subseteq x$; and if $\phi \SEQ x \subseteq x$ or $x \SEQ
\phi \subseteq x$, then $\rtc{\phi} \SEQ x \subseteq x$ or $x \SEQ
\rtc{\phi} \subseteq x$ respectively. Transition structures also
include `tests' somewhat in Kozen's sense, except that they needn't
form a Boolean algebra (but a Heyting algebra).
Each transition structure determines two relation transformers $(\phi \SEQ)$ and $(/ \phi)$. The
definition of these does not use equality. These are closely related to
the predicate transformers $\angelU{\phi}$ and $\demonU{\phi}$.
We have $(\phi \SEQ) = (\inv{}) \cdot (\assert{\phi} \cdot) \cdot (\inv{})$
and $(/ \phi) = (\assume{\phi} \cdot)$.
\subsubsection[ObjRel]{Relations and transition structures}
\label{sec:relations-as-objects}
\begin{enumerate}
\item
{\ST's as \REL's}
\infer{f : A -> B}{\graph{f} : A -> \Pow{B}}{}
\begin{eqnarray}
\label{eq:7}
b |= (\graph{f}) a == b = f(a)
\end{eqnarray}
\item
{\ST's as \TS's}
\infer{f : A -> B}{\graph{f} : A -> \Fam{B}}{}
\begin{eqnarray}
T(a) == N_1 &;& s[\BLANK] == f(s)
\end{eqnarray}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\item
{predicates as \REL's}
\infer{U : \Pow{A}}{\plift{U} : A -> \Pow{A}}{}
\begin{eqnarray}
\label{eq:7}
b |= (\plift{U}) a == a |= U \wedge b = a
\end{eqnarray}
\item
{predicates as \TS's}
\infer{U : \Pow{A}}{\plift{U} : A -> \Fam{A}}{}
\begin{eqnarray}
T == U &;& s[\BLANK] == s
\end{eqnarray}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\item
{domain restriction of \REL's}
\infer{Q : A -> \Pow{B} \hspace{1cm} U : \Pow{A}}
{\guard{U}{Q} : A -> \Pow{B}}
{}
\begin{eqnarray}
\label{eq:41}
b |= (\guard{U}{Q}) a == a |= U \wedge b |= Q(a)
\end{eqnarray}
Note: some redundancy. $\guard{U}{R} = \plift{U} \SEQ R$.
\item
{domain restriction of \TS's}
\infer{\phi : A -> \Fam{B} \hspace{1cm} U : \Pow{A}}
{\guard{U}{\phi} : A -> \Fam{B}}
{}
\begin{eqnarray}
T == U \cap T_\phi &;& s[<\BLANK,t>] == s[t]_\phi
\end{eqnarray}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\item
{mapping \REL's by a st}
\infer{R : A -> \Pow{B} \hspace{1cm} f : C -> B
}{ \Pow{f} \cdot R : A -> \Pow{C}
}{}
\begin{eqnarray}
\label{eq:4}
c |= (\Pow{f} \cdot R) a == f(c) |= R(a)
\end{eqnarray}
\item
{mapping \TS's by a st}
\infer{\phi : A -> \Fam{B} \hspace{1cm} f : B -> C
}{ \Fam{f} \cdot \phi : A -> \Fam{C}
}{}
\begin{eqnarray}
T(a) == T_\phi(a) &;& a[t] == f(a[t]_\phi)
\end{eqnarray}
Note: redundant. $\Fam{f} \cdot \phi = \phi \SEQ \graph{f}$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\item
{union, \emph{and} intersection of \REL's}
\begin{center}
\infer{Q_i : A -> \Pow{B}}{(\cup_i Q_i), (\cap_i Q_i) : A -> \Pow{B}}{}
\end{center}
\begin{eqnarray}
\label{eq:5}
(\cap_i Q_i)(a) == \cap_i (Q_i(a)) \\
(\cup_i Q_i)(a) == \cup_i (Q_i(a))
\end{eqnarray}
Note, no counterpart to intersection on \TS's.
\item
{union of \TS's}
\infer{\phi_i : A -> \Fam{B}}{(\sqcup_i \phi_i) : A -> \Fam{B}}{}
\begin{eqnarray}
T(a) == \sum_{i} T_{\phi_i}(a) &;& a[~~*] == a[t]_i
\end{eqnarray}
Note, no counterpart of intersection.
\item
{argument swapping}
\begin{center}
\infer{ Q : A -> \Pow{B}}
{\reverse{Q} : B -> \Pow{A}}{}
\end{center}
\begin{eqnarray}
\label{eq:1}
a |= \reverse{Q}(b) == b |= Q(a)
\end{eqnarray}
Notes
\begin{itemize}
\item no counterpart operation on \TS's
\item prime example of a relation transformer. Contravariant functor
on the category of sets and relations. An involution. Called
converse, inverse, reverse, interchange, twist, flip, swap, and
so on.
\item determines a notion of $(\reverse{})$-duality for relation transformers. The
$(\reverse{})$-dual of a relation transformer $\Phi$ is $(\reverse{}) \cdot \Phi \cdot (\reverse{})$.
For example, the division $(Q \setminus)$ is $(\reverse{})$-dual to $(/ (\reverse{Q}))$.
\end{itemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\item
{\REL's closed under sequential composition}
\begin{center}
\infer{
R : A -> \Pow{B} \hspace{1cm}
Q : B -> \Pow{C}}
{(R \SEQ Q) : A -> \Pow{C}}{}
\end{center}
\begin{eqnarray}
\label{eq:2}
c |= (R \SEQ Q)(a) == R(a) \overlaps \reverse{Q}(c)
\end{eqnarray}
\item
{identity \REL{}}: \hspace{1cm}
$ %\[ %\infer
{}
{\SKIP = \graph{\SKIP} : A -> \Pow{A}}
{}
$ % \]
\begin{eqnarray}
\label{eq:21}
a |= \SKIP(a') == a = a'
\end{eqnarray}
\item
{\TS's closed under sequential composition}
\begin{center}
\infer{
\phi : A -> \Fam{B} \hspace{1cm}
\psi : B -> \Fam{C}}
{(\phi \SEQ \psi) : A -> \Fam{C}}{}
\end{center}
\begin{eqnarray}
T(a) == \sum_{t_1 : T_{\phi}(a)} T_\psi(a[t_1]_\phi) &;& a[] == (a[t_1]_\phi)[t_2]_\psi
\end{eqnarray}
\item
{identity \TS}: \hspace{1cm}
$ % \[ % \infer{}
{\SKIP = \graph{\SKIP} : A -> \Fam{A}}
$ % \] % {}
\begin{eqnarray}
T(\BLANK) == N_1 &;& a[\BLANK] == a
\end{eqnarray}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\item
{closures of \REL's}
\infer{R : A -> \Pow{A}}
{\opt{R}, \tc{R}, \rtc{R} : A -> \Pow{A}}
{}
\begin{eqnarray}
\label{eq:8}
\rtc{R} &==&
\cap \SET{T : A -> \Pow{A}}{(\SKIP \cup (R \SEQ T)) \subseteq T} \\
&=& \cup \SET{(R \SEQ)^n \SKIP}{n = 0, 1, \ldots}
\nonumber
\end{eqnarray}
$\opt{R} == R \cup \SKIP$, \hspace{1cm} $\tc{R} == R \SEQ \rtc{R}$.
\item
{closures of \TS's}
\infer{ \phi : A -> \Fam{A}}
{\rtc{\phi}, \opt{\phi}, \tc{\phi} : A -> \Fam{A}}
{}
$\rtc{\phi}$: \hspace{1cm}
\begin{eqnarray}
\label{eq:6}
T &==&
\begin{array}[t]{l}
(\mu\;X : A -> \Set)\\
(\forall \; a : A) \\
\begin{array}{r@{}l}
& \SING{\mbox{nil}} \\
\cup & \SET{\mbox{cons}(t_0, t') }{ t_0 : T_\phi(a)
, t' : X(a[t_0]_\phi)} \\
& \subseteq X(a)
\end{array}
\end{array}
\\
a[\mbox{nil}] &==& a
\nonumber \\
a[\mbox{cons}( t_0,t' ) ] &==& (a[t_0]_\phi)[t']
\nonumber
\end{eqnarray}
$\opt{\phi} == \phi \sqcup \SKIP$, \hspace{1cm}
$\tc{\phi} == \phi \SEQ \rtc{\phi}$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\item
{\REL's closed under post-division}
\begin{center}
\infer{
Q : A -> \Pow{B} \hspace{1cm}
R : C -> \Pow{B}
}{
(Q / R) : A -> \Pow{C}
}{}
\end{center}
\begin{eqnarray}
\label{eq:3}
c |= (Q / R)(a) == R(c) \subseteq Q(a)
\end{eqnarray}
\item
{pre-composition of \TS's to \REL's}
\begin{center}
\infer{
\phi : A -> \Fam{B} \hspace{1cm}
Q : B -> \Pow{C}
}{
(\phi \SEQ Q) : A -> \Pow{C}
}{}
\end{center}
\begin{eqnarray}
c |= (\phi \SEQ Q)(a) == \phi(a) \overlaps \reverse{Q}(c)
\end{eqnarray}
$ (\phi \SEQ Q)(a) = \cup \SET{Q(a[t]_\phi)}{t : T_\phi(a)} $
Note that this lets us lift a \TS{} $\phi$ to the \REL{} $(\phi \SEQ \SKIP)$.
\item
{post-division of \REL's by \TS's}
\begin{center}
\infer{
Q : A -> \Pow{B} \hspace{1cm}
\psi : C -> \Fam{B}
}{
(Q / \psi) : A -> \Pow{C}
}{}
\end{center}
\begin{eqnarray}
c |= (Q/\psi)(a) == \psi(c) \subseteq Q(a)
\end{eqnarray}
Note that this gives us a `reciprocal' lift of \TS{} $\phi$ to \REL{} $(\SKIP / \phi)$.
The reciprocal of a relation is completely different from its converse. For what
relations are the converse and reciprocal the same?
\item
{\PT's as operations on \REL's}
\infer{
\Phi : \Pow{A} -> \Pow{B} \hspace{1cm}
Q : C -> \Pow{A}
}{
(\Phi \cdot Q) : C -> \Pow{B}
}{}
\begin{eqnarray}
b |= (\Phi \cdot R)(c) == b |= \Phi (R(c))
\end{eqnarray}
\end{enumerate}
\subsubsection[MorphRel]{Morphisms between relations and transition structures}
\label{sec:morph-betw-relat}
Consider the Kleisli category for the monadic functor $\Fam{\BLANK}$. The
arrows in this category (diagrams of shape $A -> \Fam{B}$, which we picture as
vertical arrows) are called
transition structures. A morphism between two such arrows $\phi : A -> \Fam{B}$
and $\psi : C -> \Fam{D}$ is
a pair of horizontal relations $Q_1 : A -> \Pow{C}$ and $Q_2 : B -> \Pow{D}$
which form a ``sub-commuting'' square:
\[
\reverse{Q_1} \SEQ \phi \subseteq \psi ; \reverse{Q_2}
\]
This is equivalent to
\[
\reverse{Q_1} \subseteq (\psi ; \reverse{Q_2} ) / \phi
\]
or even
\[
\begin{array}{rl}
Q_1 \subseteq & (\sim) \cdot (/ \phi) \cdot (\psi ;) \cdot (\sim )
\; Q_2 \\
= & (\sim) \cdot (\assume{\phi} \cdot) \cdot ((\sim) \cdot (\assert{\psi} \cdot) \cdot (\sim) )
\cdot (\sim )
\; Q_2 \\
= & (\sim) \cdot (\assume{\phi} \cdot) \cdot (\sim) \cdot (\assert{\psi} \cdot)
\; Q_2
\end{array}
\]
One composes morphisms between arrows ``horizontally'', as relations.
(Pairs of relations are compared pointwise, for inclusion and equality.)
If instead of arrows we restrict ourselves to cycles (\ie{}
homogeneous transition structures, \ie{} endomorphisms in the Kleisli
category, the appropriate notion of morphism is a simulation.
\newpage
\subsection{Category of predicate transformers and simulations}
\label{sec:pred-transf-1}
Mumble mumble.
\subsubsection[MorphPT]{Predicate transformers (and interaction structures)}
\label{sec:morph-betw-pred}
\begin{enumerate}
\item
{Relational update, lifting \REL's to \PT's.}
\begin{center}
\infer{
R : A -> \Pow{B}
}{
\assert{R}, \assume{R} : \Pow{B} -> \Pow{A}
}
{}
\end{center}
\begin{eqnarray}
a |= \assert{R}(U) &==& R(a) \overlaps U \\
a |= \assume{R}(U) &==& R(a) \subseteq U
\end{eqnarray}
\item
{angelic and demonic lifting of a \TS{} to an \IS{}}
\begin{center}
\infer{
\phi : A -> \Fam{B} \hspace{1cm}
}{
\assert{\phi}, \assume{\phi} : A -> \Fam{(\Fam{B})}
}{}
\end{center}
angel $\assert{\phi}$
\begin{eqnarray}
\label{eq:101}
C &==& T_\phi
\\
R(\BLANK,\BLANK) &==& N_1
\nonumber \\
a[t/\BLANK] &==& a[t]_\phi
\nonumber
\end{eqnarray}
demon $\assume{\phi}$
\begin{eqnarray}
\label{eq:10}
C(\BLANK) &==& N_1 \\
R(a,\BLANK) &==& T_\phi(a) \nonumber \\
a[\BLANK/t] &==& a[t]_\phi \nonumber
\end{eqnarray}
\item
{\IS's as \PT's}
\infer{
\Phi : A -> \FamFam{B}
}{
\Phi : \Pow{B} -> \Pow{A}
}{}
\begin{eqnarray}
\label{eq:11}
a |= \Phi(U) &==& {(\exists\;c : C_\Phi(a))\; \SET{a[c/r]_\Phi}{ r : R_\Phi(a,c) } \subseteq U}
\end{eqnarray}
Note: $\Phi : A -> \FamFam{B}$ can always be written $\assert{\phi}
\SEQ \assume{\psi}$ for certain $\phi : A -> A'$, $\psi : A' ->
\Fam{B}$, as follows. Write $\Phi_{\mbox{\tiny pre}}$,$\Phi_{\mbox{\tiny post}}$ for $\phi$,$\psi$ .
\[
\begin{array}[t]{rcl}
A' &=& \SET{\mbox{pending}(a,c)}{a : A, c : C_\Phi(a)}
\\
\phi(a) &=& \SET{\mbox{pending}(a,c)}{c : C_\Phi(a)}
\\
\psi(\mbox{pending}(a,c)) &=& \SET{a[c/r]_\Phi}{r : R_\Phi(a,c)}
\end{array}
\]
\newpage
\item
{infima, suprema of \PT's}
\infer{
F_i : \Pow{A} -> \Pow{B}
}{
(\sqcap_i F_i), (\sqcup_i F_i) : \Pow{A} -> \Pow{B}
} {}
\begin{eqnarray}
(\sqcap_i F_i)(U) &==& \cap_i (F_i(U)) \\
(\sqcup_i F_i)(U) &==& \cup_i (F_i(U))
\end{eqnarray}
\item
{infima, suprema of \IS's}
\infer{
\Phi_i : A -> \FamFam{B}
}{
(\sqcap_i \Phi_i), (\sqcup_i \Phi_i) : A -> \FamFam{B}
} {}
angelic $\sqcup$:
\begin{eqnarray}
C &==& \cup_i C_i \\
R(s,**) &==& R_i(s,c) \nonumber \\
s[**/r] &==& s[c/r]_i \nonumber
\end{eqnarray}
demonic $\sqcap$:
\begin{eqnarray}
C &==& \cap_i C_i \\
R(s,f) &==& (\exists\;i)\; R_i(s,f(i)) \nonumber \\
s[f/**] &==& s[f(i)/r]_i \nonumber
\end{eqnarray}
\item
{sequential composition of \PT's}
\begin{center}
\infer{
F : \Pow{A} -> \Pow{B} \hspace{1cm}
G : \Pow{B} -> \Pow{C}
}{
(F \SEQ G) : \Pow{A} -> \Pow{C}
}
{}
\end{center}
\begin{eqnarray}
(F \SEQ G)(U) &==& F (G(U))
\end{eqnarray}
\item
{sequential composition of \IS's}
\begin{center}
\infer{
\Phi : A -> \FamFam{B} \hspace{1cm}
\Psi : B -> \FamFam{C}
}{
(\Phi \SEQ \Psi) : A -> \FamFam{C}
}
{}
\end{center}
\begin{eqnarray}
C &==& \Phi(C_\Psi) \\
&=& \SET{a : A}{(\exists\;c : C_\Phi(s))(\forall\;r: R_\Phi(a,c))\;C_\Psi(a[c/r]_\Phi) }
\nonumber \\
R (a,) &==& (\exists\;r : R_\Phi(a,c))\; R_\Psi(a[c/r]_\Phi,f(r))
\nonumber \\
a[/] &==& (s[c/r_0]_\Phi)[f(r_0)/r']_\Psi
\nonumber
\end{eqnarray}
\newpage
\item
{identity \PT}: \hspace{1cm}
$
\SKIP = \assert{\SKIP} = \assume{\SKIP} : \Pow{A} -> \Pow{A}
$
\begin{eqnarray}
b |= \SKIP(U) &==& b |= U
\end{eqnarray}
\item
{identity \IS}: \hspace{1cm}
$
\SKIP = \assert{\SKIP} = \assume{\SKIP} : A -> \FamFam{A}
$
\begin{eqnarray}
C(\BLANK) &==& N_1
\\
R(\BLANK,\BLANK) &==& N_1
\nonumber \\
a[\BLANK/\BLANK] &==& a
\nonumber
\end{eqnarray}
\item
{dual of an \IS}
\begin{center}
\infer{
\Phi : A -> \Fam{(\Fam{B})}
}{
\dual{\Phi} : A -> \Fam{(\Fam{B})}
}{}
\end{center}
\begin{eqnarray}
\label{eq:10}
C(a) &==& (\forall\; c : C_\Phi(a))R_\Phi(a,c)
\\
R(a,\BLANK) &==& C_\Phi(a)
\nonumber \\
a[f/c] &==& a[c/f(c)]_\Phi
\nonumber
\end{eqnarray}
Note: this doesn't have very good properties constructively. One can
however calculate duals formally, by changing suprema to infima, angels
by demons, \etc{}.
\newpage
\item
{closures of pt's}:
\begin{center}
\infer{
F : \Pow{A}->\Pow{A}
}{
\opt{F},\tc{F},\rtc{F},\ctr{F} : \Pow{A} -> \Pow{A}
}{}
\end{center}
\begin{eqnarray}
\label{eq:9}
\rtc{F}
&==&
\begin{array}[t]{l}
\cap \SET{ T : \Pow{A} -> \Pow{A}}
{ %(\forall\; U : \Pow{U})
(\SKIP \cup (F \SEQ T)) \subseteq T }
\end{array}
\\
&=& U |-> \SET{a : A}
{(\forall\;V : \Pow{A})\; ((U \cup F(V)) \subseteq V) -> V(a)
}
\nonumber \\
&=& \cup_\alpha (F \SEQ)^\alpha(\SKIP)
\nonumber \\
\ctr{F} &==& \cup \SET{T : \Pow{A} -> \Pow{A}}{T \subseteq (\SKIP \cap (\dual{F} \SEQ T)) }
\\
&=& U |-> \SET{a : A}
{(\exists\;V : \Pow{A})\; V \subseteq (U \cap (\dual{F}(V)) \wedge V(a)
}
\nonumber \\
&=& \cap_n \SET{F_n}{ F_0 = \SKIP ; F_{n+1} = F_n \cap (\dual{F} \SEQ F_n) }
\nonumber
\end{eqnarray}
\item
{closures of is's}
\infer{ \Phi : A -> \FamFam{A}}
{\rtc{\Phi}, \opt{\Phi}, \tc{\Phi}, \ctr{\Phi} : A -> \FamFam{A}}
{}
$\rtc{\Phi}$ %: \hspace{1cm}
\begin{eqnarray}
\label{eq:6}
C ==
\begin{array}[t]{l}
(\mu\;X : A -> \Set)\\
(\forall \; a : A) \\
\begin{array}{@{}r@{}l}
&\SING{\mbox{exit}} \\
\cup & \SET{\mbox{call}(c, f) }{ c : C_\Phi(a), f : (\forall\;r : R(a,c))\; X(a[c/r]_\Phi)} \\
& \subseteq X(a)
\end{array}
\end{array}
\end{eqnarray}
\begin{eqnarray}
R(a,\mbox{exit})
&==& \SING{\mbox{nil}}
\nonumber \\
R(a,\mbox{call}(c,f))
&==& \SET{ \mbox{cons}(r_0,r') }{ r_0 : R_\Phi(a,c), r' : R(a[c/r_0],f(r_0)) }
\nonumber \\
a[\mbox{exit}/\mbox{nil}]
&==& a
\nonumber \\
a[\mbox{call}(c,f)/\mbox{cons}( r_0,r' ) ]
&==& (a[c/r_0]_\Phi)[f(r_0)/r']
\nonumber
\end{eqnarray}
$\opt{\Phi} == \Phi \sqcup \SKIP$, \hspace{1cm}
$\tc{\Phi} == \Phi \SEQ \rtc{\Phi}$.
$\ctr{\Phi}$ %: \hspace{1cm}
\begin{eqnarray}
\label{eq:6}
C ==
\begin{array}[t]{l}
(\nu\;X : A -> \Set)\\
(\forall \; a : A) \\
\begin{array}{rl}
X(a) \subseteq
& \SET{\mbox{srv}(f,g) }{
\begin{array}[t]{l}
f : (\forall\;c : C_\Phi(a))R_\Phi(a,c), \\
g : (\forall\;c : C_\Phi(a)) X(a[c/f(c)]_\Phi)} \\
\end{array}
\end{array}
\end{array}
\end{eqnarray}
\begin{eqnarray}
R(a,\mbox{srv}(f,g))
&==&
\begin{array}[t]{@{}r@{}l}
&\SING{\mbox{nil}} \\
\cup & \SET{\mbox{cons}(c_0,c')}{c_0 : C_\Phi(a), c' : R(a[c_0/f(c_0)],g(c_0)) }
\end{array}
\nonumber \\
% R(a,\mbox{call}(c,f))
% &==& \SET{ \mbox{cons}(r_0,r') }{ r_0 : R_\Phi(a,c), r' : R(a[c/r_0],f(r_0)) }
% \nonumber \\
a[\mbox{srv}(f,g)/\mbox{nil}]
&==& a
\nonumber \\
a[\mbox{srv}(f,g)/\mbox{cons}(c_0,c' ) ]
&==& (a[c_0/f(c_0)]_\Phi)[g(c_0)/c']
\nonumber
\end{eqnarray}
\item
{functional assignment as a \PT}\\
\centerline{
\infer{f : A \to B }{ \assign{f} : \Pow{B} \to \Pow{A} }{}
}
\[
s |= (\assign{f})(P) == f(s) |= P
\]
Note: $\assign{f} = (\cdot f) = f^{-1} = \Pow{f}$.
Note: $\assign{f} \SEQ \assign{g} = \assign{g \cdot f}$.
Redundant.
\[
\assign{f} = \assert{\graph{f}} = \assume{\graph{f}}
\]
\item
{functional assignment as an \IS}\\
\centerline{
\infer{f : A \to B }{ \assign{f} : A \to \FamFam{B} }{}
}
\begin{eqnarray}
\label{eq:17}
C(\BLANK) &==& N_1 \nonumber \\
R(\BLANK,\BLANK) &==& N_1 \nonumber \\
a[\BLANK/\BLANK] &==& f(a) \nonumber
\end{eqnarray}
\end{enumerate}
\subsubsection[MorphPT]{Morphisms between predicate transformers}
\label{sec:morph-betw-pred-1}
In analogy with transition structures, we could define a morphism between
interaction structures to be a pair of predicate transformers that
satisfy the appropriate sub-commutativity property. However, in this case
we insist that the predicate transformer is \emph{angelic}, that is commutes
with all disjunctions, that is is determined by its
value at singletons, that is is an angelic relational update.
Give definition.
Give it for interaction structures. Note: really between an is and a pt.
\newpage
\section{laws}
Laws for (inferring inclusion and equality between) relations.
\begin{enumerate}
\item Predicate transformers determine relation transformers.
However, the effect of certain predicate transformers can sometimes
be expressed merely from sequential composition, and division. The
following are equations between relation transformers.
\begin{eqnarray}
\label{eq:12}
\begin{array}[t]{lcl}
(Q \SEQ) &=& (\inv{}) \cdot (\assert{Q} \cdot) \cdot (\inv{})
\\
(\SEQ Q) &=& (\assert{\inv{Q}} \cdot)
\end{array}
&&
\begin{array}[t]{lcl}
(/Q) &=& (\assume{Q} \cdot) \\
(\setminus Q) &=& (\inv{}) \cdot (\assume{\inv{Q}} \cdot) \cdot (\inv{})
%\nonumber
\end{array}
\end{eqnarray}
To a certain extent, we are interested in representing relations
with transition structures -- we may represent a relation as a
transition structure, or as the converse of a transition structure,
or as the reciprocal of a transition structure (and so on and on).
\item I might have introduced binary operators for relative complement and
implication. Then the adjunctions for relations are:
\begin{eqnarray}
\label{eq:15}
(R_1 \SEQ R_2) \subseteq Q &<=>& R_1 \subseteq (Q / R_2) \\
(R_1 - R_2) \subseteq Q &<=>& R_1 \subseteq (R_2 \cup Q) \\
(R_1 \cap R_2) \subseteq Q &<=>& R_1 \subseteq (R_2 => Q)
\end{eqnarray}
Some laws (need to check):
\begin{eqnarray}
Q - (R_1 \cup R_2) &=& (Q - R_1) \cap (Q - R_2) = ((Q - R_1)-R_2) \\
Q - (R_1 \cap R_2) &\supseteq& (Q - R_1) \cup (Q - R_2) \\
(R_1 \cup R_2) => Q &=& (R_1 => Q) \cap (R_2 => Q) \\
(R_1 \cap R_2) => Q &=& R_1 => (R_2 => Q)
\end{eqnarray}
Non-binary sups and infs?
\item laws for sups and infs.
\begin{eqnarray}
\label{eq:13}
% && (R_1 \SEQ R_2) \subseteq Q <=> R_1 \subseteq (Q / R_2 ) \\
&& (\cup R_i) \subseteq Q <=> (\forall\;i)\; R_i \subseteq Q \\
&& Q \subseteq (\cap R_i) <=> (\forall\;i)\; Q \subseteq R_i
\end{eqnarray}
For relations, sequential composition commutes with union (on both sides).
\item The laws for relational converse $\inv{}$. This commutes with infima and suprema (and
closures), is monotone, and
\begin{eqnarray}
\label{eq:13}
&&\inv{\inv{Q}} = Q \\
&&\inv{(\graph{f})} \SEQ (\graph{f}) \subseteq \SKIP
; \hspace{1em}
\SKIP \subseteq (\graph{f}) \SEQ \inv{(\graph{f})} \\
&& \inv{\SKIP} = \SKIP \\
&& \inv{(Q \SEQ R)} = \inv{R} \SEQ \inv{Q}
% && Q \subseteq id <=> \inv{Q} \subseteq id
\end{eqnarray}
\item
What are right laws for domain restriction?
\begin{eqnarray}
\label{eq:13}
% && (R_1 \SEQ R_2) \subseteq Q <=> R_1 \subseteq (Q / R_2 ) \\
&& \plift{U} = \guard{U}{\SKIP} \\
&& \guard{U}{R} = \plift{U} \SEQ R \\
&& \plift{U} \SEQ \plift{V} = \plift{(U \cap V)} \\
&& R_1 \subseteq R_2 => (\guard{U}{R_1}) \subseteq (\guard{U}{R_2}) \\
&& U \subseteq V => (\guard{U}{R}) \subseteq (\guard{V}{R})
\end{eqnarray}
\item Dedekind's law (modular law). This is what governs sequential composition, converse
and intersection.
\begin{eqnarray}
\label{eq:14}
(Q \SEQ R) \cap S \subseteq Q \SEQ (R \cap (\reverse{Q} \SEQ S))
\end{eqnarray}
This law is used to prove that if $Q$ is a partial function
(`deterministic' relations) then $Q \SEQ (R_1 \cap R_2) = (Q \SEQ R_1) \cap (Q \SEQ R_2)$.
In other words $(Q \SEQ)$ is conjunctive (it is always disjunctive).
\item
Intersection of relations.
\begin{eqnarray}
\label{eq:16}
\test{U} \cap \test{V} &=& \test{U \cap V} \\
(Q_1 \cap Q_2) \cap Q_3 &=& Q_1 \cap (Q_2 \cap Q_3) \\
Q \cap \void &=& \void \\
Q \cap \chaos &=& Q \\
Q_1 \cap Q_2 &=& Q_2 \cap Q_1 \\
Q \cap Q &=& Q \\
Q \cap (Q \cup R) &=& Q \\
Q \cap (\cup_i R_i) &=& \cup_i (Q \cap R_i) \\
Q \cap (\cap_i R_i) &=& \cap_i (Q \cap R_i) \\
Q \cap (R_1 \SEQ R_2) &\subseteq& R_1 \SEQ (R_2 \cap (\inv{R_1} \SEQ Q)) \\
Q \cap \inv{R} &=& \inv{(\inv{Q} \cap R)}
\end{eqnarray}
\item
Union of relations.
\begin{eqnarray}
\label{eq:16}
\test{U} \cup \test{V} &=& \test{U \cup V} \\
(Q_1 \cup Q_2) \cup Q_3 &=& Q_1 \cup (Q_2 \cup Q_3) \\
Q \cup \void &=& Q \\
Q \cup \chaos &=& \chaos \\
Q_1 \cap Q_2 &=& Q_2 \cap Q_1 \\
Q \cup Q &=& Q \\
Q \cup (Q \cap R) &=& Q \\
Q \cup (\cup_i R_i) &=& \cup_i (Q \cup R_i) \\
Q \cup (\cap_i R_i) &\subseteq& \cap_i (Q \cup R_i) \\
Q \cup (R_1 \cap R_2) &=& (Q \cup R_1) \cap (Q \cup R_2) \\
Q \cup (R_1 \SEQ R_2) &\ldots&
\end{eqnarray}
\item
Sequential composition of relations.
\begin{eqnarray}
\label{eq:18}
% \test{U} \SEQ \test{V} &=& \test{(U \cap V)} \\
(Q_1 \SEQ Q_2) \SEQ Q_3 &=& Q_1 \SEQ (Q_2 \SEQ Q_3) \\
\SKIP \SEQ Q &=& Q \\
Q \SEQ \SKIP &=& Q \\
Q \SEQ (\cap_i R_i) &\subseteq& \cap_i (Q \SEQ R_i) \\
(\cap_i Q_i) \SEQ R &\subseteq& \cap_i (Q_i \SEQ R) \\
% Q \SEQ (\cup_i R_i) &\supseteq& \cup_i (Q \SEQ R_i) \\
% (\cup_i Q_i) \SEQ R &\supseteq& \cup_i (Q_i \SEQ R)
Q \SEQ (\cup_i R_i) &=& \cup_i (Q \SEQ R_i) \\
(\cup_i Q_i) \SEQ R &=& \cup_i (Q_i \SEQ R) \\
Q \SEQ \void &=& \void \\
\void \SEQ Q &=& \void \\
\test{U} \SEQ \test{V} &=& \test{(U \cap V)} \\
\SKIP &=& \test{\chaos} \\
\graph{f} \SEQ \graph{g} &=& \graph{(g \cdot f)} \\
\SKIP &=& \graph{\SKIP}
\end{eqnarray}
\end{enumerate}
Laws for (inferring inclusion and equality between) predicate transformers.
\begin{enumerate}
\item
For predicate transformers, sequential composition commutes with sups and infs
in its left-hand argument.(Unlike the case of relations, where we don't commute with
inf's, but commute with sups on both sides.)
\begin{eqnarray}
\label{eq:20}
(F_1 \SEQ F_2) \SEQ F_3 &=& F_1 \SEQ (F_2 \SEQ F_3) \\
\SKIP \SEQ F &=& F \nonumber \\
F \SEQ \SKIP &=& F \nonumber \\
(\cup_i F_i) \SEQ G &=& \cup_i (F_i \SEQ G)
\nonumber \\
(\cap_i F_i) \SEQ G &=& \cap_i (F_i \SEQ G)
\nonumber \\
F \SEQ (\cup_i G_i) &\supseteq& \cup_i (F \SEQ G_i)
\nonumber \\
F \SEQ (\cap_i G_i) &\subseteq& \cap_i (F \SEQ G_i)
\nonumber
\end{eqnarray}
The last 2 semi-equations are just by monotonicity.
They can be strengthened to equality for certain $F$.
\[
\begin{array}[t]{lcl}
\angelU{\phi} \SEQ (\cup_i G_i) &=& \cup_i (\angelU{\phi} \SEQ G_i) \\
\demonU{\phi} \SEQ (\cap_i G_i) &=& \cap_i (\demonU{\phi} \SEQ G_i)
\end{array}
\]
\item
Special cases of sequential composition. Should be associative with unit $\SKIP$.
\begin{eqnarray}
\label{eq:19}
\angelU{Q} \SEQ \angelU{R} &=& \angelU{Q \SEQ R}
% \nonumber
\\
% \assert{\SKIP} &=& \SKIP
% \nonumber \\
\demonU{Q} \SEQ \demonU{R} &=& \demonU{Q \SEQ R}
\nonumber \\
% \assume{\SKIP} &=& \SKIP
% \nonumber \\
\SKIP &=& \assign{\SKIP}
\nonumber \\
% \assert{\test{U \cap V}} &=& \assert{\test{U}} \SEQ \assert{\test{V}}
% \nonumber \\
% \assert{\test{\chaos}} &=& \SKIP
% \nonumber \\
% \assume{\test{U \cap V}} &=& \assume{\test{U}} \SEQ \assume{\test{V}}
% \nonumber \\
% \assume{\test{\chaos}} &=& \SKIP
% \nonumber \\
% \assign{g \cdot f} &=& \assign{f} \SEQ \assign{g}
% \nonumber \\
\assign{f}
&=&
\assert{\graph{f}}
= \assume{\graph{f}}
\nonumber
\end{eqnarray}
\item Are these true?
\begin{eqnarray}
\label{eq:22}
F ; \assert{Q} &=& \assert{F \cdot Q} \\
F ; \assume{Q} &=& \assume{F \cdot Q}
\end{eqnarray}
An interaction structure is something of the form $\assert{\phi} \SEQ \assume{\psi}$.
\end{enumerate}
\end{document}
%%% Local Variables:
%%% mode: latex
%%% eTX-master: t
%%% End:
*