%%
\documentclass{llncs}
% \documentclass[a4paper,11pt]{article}
%\documentclass[twocolumn,a4paper,11pt]{article}
\usepackage{latexsym}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{exptrees}
\usepackage{url}
\usepackage{bussproofs}
\renewcommand{\today}{\number\day\
\ifcase\month\or
January\or February\or March\or April\or May\or June\or
July\or August\or September\or October\or November\or December\fi
\ \number\year}
\setcounter{secnumdepth}2
\setcounter{tocdepth}2
% \newtheorem{theorem}{Theorem}[section]
\renewcommand{\qed}{\hfill$\dashv$}
% \newenvironment{proof}{
% \noindent{\bf Proof:}}{\mbox{\bf Q.E.D.}\vspace{1ex}}
% \newtheorem{definition}{Definition}[section]
% \newtheorem{lemma}{Lemma}[section]
% \newtheorem{corollary}{Corollary}[section]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Preamble
\newcommand\cfile{}
\newcommand\comment[1]{}
\newcommand\cc[1]{#1} % for label names
\renewcommand\theenumi{(\alph{enumi})}
\renewcommand\labelenumi{\theenumi}
\renewcommand\theenumii{(\roman{enumii})}
\renewcommand\labelenumii{\theenumii}
\pagestyle{plain}
% \documentclass[envcountsect,envcountsame]{llncs}
% \usepackage{latexsym,amsmath,amssymb,url,gastex,epigraph}
\input{prooftree}
%% Theorem environments
\spnewtheorem{defn}[theorem]{Definition}{\bfseries}{\rmfamily}
\spnewtheorem{conj}[theorem]{Conjecture}{\bfseries}{\itshape}
% \spnewtheorem{comment}[theorem]{Comment}{\bfseries}{\itshape}
%% General mathematical notations
\newcommand{\pow}[1]{\mbox{Pow}(#1)}
\newcommand{\defeq}{=_{\mbox{\scriptsize{def}}}}
\newcommand{\restrict}{\upharpoonright}
\newcommand{\nat}{\mathbb{N}}
\newcommand{\natzero}{\mathbb{N}_0}
\newcommand{\integers}{\mathbb{Z}}
\newcommand{\range}[2]{\{{#1},\ldots,{#2}\}}
\newcommand{\setcomp}{\;|\;}
\newcommand{\pair}[2]{\langle{#1},{#2}\rangle}
\newcommand{\partialfn}{\rightharpoonup}
\newcommand{\mathmin}[1]{\mbox{min}(#1)}
\newcommand{\mathmax}[1]{\mbox{max}(#1)}
%% Language
\newcommand{\fv}[1]{{\mathcal{V}(#1)}}
\newcommand{\vars}{\mathcal{V}}
\newcommand{\monoid}{\langle R,\circ,e \rangle}
\newcommand{\abgroup}{\langle R,\circ,e,-\rangle}
%% Proof systems
\newcommand{\LJ}{\mathrm{LJ}}
\newcommand{\LK}{\mathrm{LK}}
\newcommand{\hilbert}[1]{\mathrm{HL}_{#1}}
\newcommand{\LBI}{\mathrm{LBI}}
\newcommand{\dl}[1]{\mathrm{DL}_{#1}}
\newcommand{\dcalc}{\mathcal{D}}
%% Display logic structures
\newcommand{\emp}{\mathbf{0}}
\newcommand{\aemp}{\emptyset}
\newcommand{\memp}{\varnothing}
\newcommand{\inv}{\natural}
\newcommand{\ainv}{\sharp}
\newcommand{\minv}{\flat}
\newcommand{\app}{\bullet}
\newcommand{\aapp}{\mathrel{;}}
\newcommand{\mapp}{\mathrel{,}}
\newcommand{\imp}{\mathbin{- \! {\bullet}}}
\newcommand{\aimp}{\Rightarrow}
\newcommand{\mimp}{\multimap}
\newcommand{\dpost}{\rightleftarrows_D}
\newcommand{\displayeq}{\equiv_{D}}
\newcommand{\adrewr}{\rightarrow_{AD}}
\newcommand{\assocrewr}{\rightarrow_A}
\newcommand{\adeq}{\equiv_{AD}}
\newcommand{\plaindel}[2]{\mbox{$#1 \setminus #2$}}
\newcommand{\del}[2]{\mbox{$(#1) \setminus #2$}}
\newcommand{\builtfrom}[2]{#1 \lhd #2}
\newcommand{\notbuiltfrom}[2]{#1 \ntriangleleft #2}
\newcommand{\con}{{\cal C}}
%% Logical symbols
\newcommand{\true}{\top}
\newcommand{\false}{\bot}
\renewcommand{\implies}{\rightarrow}
\renewcommand{\conj}{\mathrel{\&}}
\newcommand{\atrue}{\true_{a}}
\newcommand{\afalse}{\false_{a}}
\newcommand{\aconj}{\conj_{a}}
\newcommand{\adisj}{\vee_{a}}
\newcommand{\mtrue}{\true^*}
\newcommand{\mfalse}{\false^{\!*}}
\newcommand{\mneg}{\mathord{\sim}}
\newcommand{\mor}{\mathrel{\overset{\vcenter{\offinterlineskip
\hbox{\scriptsize{$\ast$}}\vskip-1.5ex}}{\vee}}}
\newcommand{\wand}{\mathrel{\hbox{---}\llap{$\ast$}}}
\newcommand{\bigand}{\textstyle\bigwedge}
\newcommand{\bigor}{\textstyle\bigvee}
\newcommand{\dlantform}[1]{\Psi_{#1}}
\newcommand{\dlconform}[1]{\Upsilon_{#1}}
\newcommand{\myC}{\textbf{C}}
\newcommand{\myS}{\textbf{S}}
%% Provability and satisfiability relations
\newcommand{\seq}[2]{{#1 \vdash #2}}
\newcommand{\entails}[3]{#2 \vdash_{#1} #3}
\newcommand{\notseq}[2]{#1 \not\vdash #2}
\newcommand{\drvequiv}[2]{#1 \dashv\vdash #2}
\newcommand{\sat}[2]{#1 \models_\rho #2}
\newcommand{\notsat}[2]{#1 \not\models_\rho #2}
\newcommand{\drvof}[1]{\begin{array}{c}\vdots\\{#1}\end{array}}
\newcommand{\drvfrom}[2]{\[{#1} \leadsto {#2}\]}
\newcommand{\LADI}{\mathrm{LADI}}
%% Logical proof rule labels
\newcommand{\ax}{\mbox{(Id)}}
\newcommand{\bunchequiv}{\mbox{(Equiv)}}
\newcommand{\acut}{\mbox{(ACut)}}
\newcommand{\mcut}{\mbox{(MCut)}}
\newcommand{\subst}{\mbox{(Subst)}}
\newcommand{\truel}{\mbox{($\true$L)}}
\newcommand{\truer}{\mbox{($\true$R)}}
\newcommand{\falsel}{\mbox{($\false$L)}}
\newcommand{\falser}{\mbox{($\false$R)}}
\newcommand{\mtruel}{\mbox{($\mtrue$L)}}
\newcommand{\mtruer}{\mbox{($\mtrue$R)}}
\newcommand{\mfalsel}{\mbox{($\mfalse$L)}}
\newcommand{\mfalser}{\mbox{($\mfalse$R)}}
\newcommand{\negr}{\mbox{($\neg$R)}}
\newcommand{\negl}{\mbox{($\neg$L)}}
\newcommand{\andr}{\mbox{($\&$R)}}
\newcommand{\andl}{\mbox{($\&$L)}}
\newcommand{\orr}{\mbox{($\vee$R)}}
\newcommand{\intorr}[1]{\mbox{($\vee \mathrm{R}_{#1}$)}}
\newcommand{\orl}{\mbox{($\vee$L)}}
\newcommand{\impr}{\mbox{($\rightarrow$R)}}
\newcommand{\impl}{\mbox{($\rightarrow$L)}}
\newcommand{\starl}{\mbox{($*$L)}}
\newcommand{\starr}{\mbox{($*$R)}}
\newcommand{\wandl}{\mbox{($\wand$L)}}
\newcommand{\wandr}{\mbox{($\wand$R)}}
\newcommand{\morl}{\mbox{($\mor$L)}}
\newcommand{\morr}{\mbox{($\mor$R)}}
\newcommand{\mnegl}{\mbox{($\mneg$L)}}
\newcommand{\mnegr}{\mbox{($\mneg$R)}}
\newcommand{\atruer}{\mbox{($\atrue$R)}}
\newcommand{\afalsel}{\mbox{($\afalse$L)}}
\newcommand{\aconjl}{\mbox{($\aconj$L)}}
\newcommand{\aconjr}{\mbox{($\aconj$R)}}
\newcommand{\adisjl}{\mbox{($\adisj$L)}}
\newcommand{\adisjr}{\mbox{($\adisj$R)}}
\newcommand{\rewr}{\mbox{($\equiv$)}}
\newcommand{\rewrl}{\mbox{($\dashv\vdash$L)}}
\newcommand{\rewrr}{\mbox{($\dashv\vdash$R)}}
\newcommand{\eq}{\mbox{($=$)}}
\newcommand{\MP}{\mbox{(MP)}}
% Display rule labels
\newcommand{\adl}[2]{\mbox{(AD{#1}{#2})}}
\newcommand{\mdl}[2]{\mbox{(MD{#1}{#2})}}
\newcommand{\display}{\mbox{($\displayeq$)}}
\newcommand{\adrule}{\mbox{($\adeq$)}}
% Structural rule labels
\newcommand{\weak}{\mbox{$\mathrm{(W)}$}}
\newcommand{\contr}{\mbox{$\mathrm{(C)}$}}
\newcommand{\assoc}{\mbox{$\mathrm{(\alpha)}$}}
\newcommand{\aunitcl}{\mbox{$\mathrm{(\aemp C_L)}$}}
\newcommand{\aunitcr}{\mbox{$\mathrm{(\aemp C_R)}$}}
\newcommand{\aunitwl}{\mbox{$\mathrm{(\aemp W_L)}$}}
\newcommand{\aunitwr}{\mbox{$\mathrm{(\aemp W_R)}$}}
% Derived proof rule labels
\newcommand{\dlantl}{\mbox{($\dlantform{}$L)}}
\newcommand{\dlconr}{\mbox{($\dlconform{}$R)}}
% to avoid latex separating letters in math mode
\newcommand{\As}{\mathit{As}}
\newcommand{\Fs}{\mathit{Fs}}
\newcommand{\Us}{\mathit{Us}}
\newcommand{\Vs}{\mathit{Vs}}
\newcommand{\Ys}{\mathit{Ys}}
\begin{document}
% \title{Mechanisation of Proofs of Interpolation for Display Logic} % Jeremy
% \title{Mechanising Craig Interpolation for Substructural Logics} % James
\title{Machine-checked Interpolation Theorems for Substructural Logics using
Display Calculi} % Raj
\author{
Jeremy E.\ Dawson\inst{1}%
\thanks{Supported by Australian Research Council Discovery Grant DP120101244}
\and
James Brotherston\inst{2}%
\thanks{Supported by an EPSRC Career Acceleration Fellowship}
\and
Rajeev Gor\'e\inst{1}
% \thanks{Contact Author }
% \thanks{Supported by an Australian Research Council QEII Fellowship}
}
\institute{
%Logic and Computation Group,
Research School of Computer Science,
Australian National University
%Canberra ACT 0200, Australia
\and
University College London, UK
%\\
% \url{http://users.cecs.anu.edu.au/~jeremy/}
% \ \ \url{http://users.cecs.anu.edu.au/~rpg/}
% \ \ \url{http://www.eecs.qmul.ac.uk/~jamesb/}
}
\date{}
%% Title
\maketitle
\begin{abstract}
We present a mechanised formalisation, in Isabelle/HOL,
of Brotherston and Gor\'e's proof of Craig interpolation for
a large class display calculi for various propositional substructural logics.
Along the way, we discuss the particular difficulties
associated with the local interpolation property for various rules,
and some important differences between our proofs and those of Brotherston
and Gor\'e, which are motivated by the ease of mechanising the development.
%formalising
%the definitions used in intermediate lemmas.
%and we detail how we proved the more difficult cases,
%such as the weakening rules and the multiplicative binary logical
%introduction rules., such as the
%binary logical introduction rules in their additive and multiplicative forms,
%and the weakening and unit weakening rules. We also
Finally, we discuss the value for this work of
using a prover with a programmable user interface
(here, Isabelle with its Standard ML interface).
%
%\\[1ex]
%Keywords:
%Craig interpolation,
%display logic,
%interactive theorem proving,
%proof theory
\end{abstract}
\begin{keywords}
Craig interpolation,
display logic,
interactive theorem proving
%proof theory
\end{keywords}
\newcommand\derl{\texttt{derl}}
\newcommand\derrec{\texttt{derrec}}
\newcommand\rls{\texttt{rls}}
\newcommand\adm{\texttt{adm}}
\newcommand\invrule[2]{
% for use in non-math mode, args interpreted in math mode
\begin{tabular}{@{}c@{}}
$ #1 $ \\ \hline \hline $ #2 $
\end{tabular}}
\newcommand\slrule[2]{ % similar to \frac, except
% for use in non-math mode, args interpreted in math mode
\begin{tabular}{@{}c@{}}
$ #1 $ \\ \hline $ #2 $
\end{tabular}}
\newcommand\src[1]{source file \texttt{#1}}
\newcommand\apsrc[1]{Appendix \ref{app:code}}
\newcommand\seesrc[1]{(see source file \texttt{#1})}
\newcommand\insrc[1]{in source file \texttt{#1}}
\newcommand\page[1]{page~\pageref{#1}}
\newcommand\fig[1]{Fig.~\ref{fig:#1}}
\newcommand\ch[1]{Chapter~\ref{ch:#1}}
\newcommand\tbl[1]{Table~\ref{tbl:#1}}
\newcommand\thrm[1]{Theorem~\ref{thrm:#1}}
\newcommand\lem[1]{Lemma~\ref{lem:#1}}
\newcommand\ttl[1]{\tti{#1}\label{#1}}
\newcommand\ttdl[1]{\texttt{#1}\tti{#1}\label{#1}}
\newcommand\ttdef[1]{\texttt{#1}\tti{#1}}
\newcommand\tti[1]{\index{#1@\texttt{#1}}}
\newcommand\bfi[1]{\textbf{#1}\index{#1|textbf}}
\newcommand\up[1]{\vspace{-#1ex}}
\newcommand\vpf{\vspace{-3ex}} % to reduce space before proof tree
\newcommand\1{\textbf 1}
\newcommand\0{\textbf 0}
%\newcommand\cut{(\emph{cut})}
\newcommand{\cut}{\mbox{(Cut)}}
\hyphenation{
assump-tion
in-volves
non-classical
conn-ection
typi-cally
tradi-tional
frame-works
con-clusion
deriv-ation
}
%% Here begins the main text
\section{Introduction} \label{s-intro}
% this section should be an intro to the topics generally
In calculi for logical entailment, \emph{Craig interpolation} is the property
that for any entailment $A \vdash B$ between formulae, there exists an
\emph{interpolant} formula $I$ such that $A \vdash I$ and $I \vdash B$ are
both entailments of the calculus, while $I$ mentions only those variables or
nonlogical constants that are common to both $A$ and $B$. It has long been
known that there are close connections between interpolation and other
central logical concerns (see e.g.~\cite{Feferman:08}); indeed, one of
Craig's original applications of interpolation was to give a new proof of
Beth's Definability Theorem~\cite{Craig:57-2}. More recently, though, it has
transpired that interpolation has significant applications in program
verification as well; e.g., in the inference of \emph{loop invariants}~\cite{McMillan:08}, and
in model checking ~\cite{Caniart:10}.
Recently, Brotherston and Gor\'e~\cite{brotherston-gore-interpolation}
gave a modular
proof of interpolation for a class of propositional substructural logics,
based on Belnap's \emph{display logic}~\cite{belnap-display} (here we
prefer the term \emph{display calculi}). Roughly
speaking, display calculi are two-sided sequent calculi equipped with a
richer-than-usual notion of sequent structure and a principle by which
sequents can be rearranged so as to ``display'' any chosen substructure as
the \emph{entire} left or right hand side (much like rearranging a
mathematical equation for a chosen variable). The main attraction of
display calculi is Belnap's general cut-elimination result,
which says that cut-elimination holds for any display calculus whose rules
satisfy eight easily verifiable syntactic conditions. Cut-elimination is
generally essential to the standard proof-theoretic approach to
interpolation, which is to proceed by induction on cut-free derivations (see e.g.~\cite{Buss:98-I}).
Despite the availability of a general cut-elimination result, however, there
seem to have been no proofs on interpolation based on display calculi prior
to~\cite{brotherston-gore-interpolation},
probably due to the inherent complexity of
their sequent structure and display principles. Indeed, in line with this
general expectation, Brotherston and Gor\'e's proof is very technical,
involving many case distinctions, and many intricate properties of
substitutions.
Moreover, due to space restrictions, most of the proofs are only sketched,
leaving the potential for errors.
Thus we believe it is vital to verify these intricate details
using an interactive theorem prover,
to give us greater confidence in their very general interpolation theorems.
In this paper, we describe the Isabelle/HOL formalisation of their results,
discuss the difficulties in formalising their proofs and describe the
differences between their proofs and ours. We also
highlight the usefulness of a programmable user interface. Our Isabelle mechanisation, comprising over 8000 lines of Isabelle theory and proof code, can be found at~\cite{mechanisation}.
%\url{http://users.cecs.anu.edu.au/~jeremy/isabelle/2005/interp/},
%over 8000 lines of Isabelle theory and proof code.
%
% Isabelle is an automated proof assistant. % \cite{isabelle-ref}.
% Its meta-logic is an intuitionistic typed higher-order logic, sufficient
% to support the built-in inference steps of higher-order unification
% and term rewriting. Isabelle accepts inference rules of the form
% $\alpha_1, \alpha_2, \ldots, \alpha_n, \Longrightarrow \beta$
% where the $\alpha_i$ and $\beta$ are expressions of the Isabelle
% meta-logic, or are expressions using a new syntax, defined by the
% user, for some ``object logic''. Most users build on one of the
% comprehensive ``object logics'' already supplied,
% like Isabelle/HOL % \cite{IsLogHOL},
% which is an Isabelle theory based on the higher-order
% logic of Church % \cite{church-types}
% HOL offers inductively defined sets, and recursive datatypes, which we use
% extensively.
%
% We assume the reader is familiar with basic
% proof-theory, ML and Isabelle/HOL.
% %
% In the paper we show some Isabelle code, edited
% to use mathematical symbols.
% In it, a `\verb|?|' precedes a variable name.
% The Appendix gives the actual Isabelle text
% of many definitions and theorems.
%TO DO - include explanation of Isabelle notation
% \begin{document}
\section{Display calculi for (some) substructural logics}%\vspace{-0.2cm}
\label{sec:display_calc}
Here, we briefly describe display calculi, and recall those display calculi for which Brotherston and
Gor\'e proved interpolation~\cite{brotherston-gore-interpolation}.
%and our approach to proving interpolation for these calculi.
%Brothterston and Gor\'e~\cite{brotherston-gore-interpolation}
% established Craig interpolation for the calculi described next.
We assume a fixed infinite set of propositional variables. \emph{Formulae} $F$ and \emph{structures} $X$ are then given by the following grammars, where $P$ ranges over propositional variables:
\[\begin{array}{l}
F ::= P \mid \true \mid \false \mid \neg F \mid F \conj F \mid F \vee F \mid F \implies F \mid \atrue \mid \afalse \mid F \aconj F \mid F \adisj F
\\[0.3em]
X ::= F \mid \aemp \mid \ainv X \mid X \aapp X
\end{array}\]
Formula connectives with an ``a'' subscript stand for an \emph{additive} version of that connective, while connectives without a subscript are construed as \emph{multiplicative}.
However, in the Isabelle formulation we do not duplicate the connectives in
this way ---
rather, we identify the logical rules for which our various results apply.
We write $F,G$ etc.\ to range over formulae and $W,X,Y,Z$ etc.\ to range over structures.
If $X$ and $Y$ are structures then $\seq{X}{Y}$ is a \emph{consecution}.
The complete set of proof rules for our display calculi is given in
Figure~\ref{fig:rules}.
%\footnote{how to stop this going to the end of the document?}.
As usual, we begin by giving a set of \emph{display postulates}, and taking the least equivalence closed under the postulates to be our notion of \emph{display-equivalence}. We then have the usual \emph{display theorem}, which says that for any structure occurrence $Z$ in a consecution $\seq{X}{Y}$, one has either $\seq{X}{Y} \displayeq \seq{Z}{W}$ or $\seq{X}{Y} \displayeq \seq{W}{Z}$ for some $W$, depending on whether $Z$ occurs positively or negatively in $\seq{X}{Y}$. Rearranging $\seq{X}{Y}$ into $\seq{Z}{W}$ or $\seq{W}{Z}$ in this way is called \emph{displaying} $Z$. We remark that the display postulates ``build in'' commutativity of the structural semi-colon, so that we consider only calculi for commutative logics.
Brotherston and Gor\'e~\cite{brotherston-gore-interpolation} consider
the additive rules, collectively, and each structural rule,
individually, to be \emph{optional} inclusions in their calculi. At
present, our mechanisation assumes the presence of the unit rules
$\aunitwl$, $\aunitwr$, $\aunitcl$, $\aunitcr$ and the associativity
rule $\assoc$. Thus the smallest display calculus we consider gives
multiplicative linear logic $\mathrm{MLL}$. By adding the additive
logical rules we obtain multiplicative-additive linear logic
$\mathrm{MALL}$, and by adding the full weakening rule $\weak$ or the
full contraction rule $\contr$ we obtain affine or strict variants of
these logics, respectively. Note that rules for weakening and
contraction on the right can be derived using the display postulates
from the corresponding left rules. Of course, if we add \emph{both}
weakening and contraction then we obtain standard classical
propositional logic.
No matter which variant of these display calculi we consider, we have the standard \emph{cut-elimination} result due to Belnap. Since we omit the cut rule from our presentation of the display calculi in Figure~\ref{fig:rules}, we state it here in the weaker form of \emph{cut admissibility}:
\begin{theorem}[cf.~\cite{brotherston-gore-interpolation}]
\label{thm:cut_elim} If $\seq{X}{F}$ and $\seq{F}{Y}$ are both provable then so is $\seq{X}{Y}$. Moreover, this property is not affected by the presence or otherwise of the additive logical rules (collectively), or of any of the structural rules.
\end{theorem}
%The following definition shows how to interpret the structural connectives.
%\begin{defn}[Interpretation of structures]
%\label{defn:consecution_validity} For any structure $X$ we define the formulas $\dlantform{X}$ and
%$\dlconform{X}$ by mutual structural induction on $X$ as:
%\[\begin{array}{rcl@{\hspace{0.5cm}}rcl@{\hspace{0.9cm}}rcl@{\hspace{0.5cm}}rcl}
%\dlantform{F} & = & F &
%\dlantform{\aemp} & = & \true & \dlconform{F} & = & F & \dlconform{\aemp} & = & \false \\
%\dlantform{\ainv X} & = & \neg\dlconform{X} &
%\dlantform{X_1 \aapp X_2} & = & \dlantform{X_1} \conj \dlantform{X_2} & \dlconform{\ainv X} & = & \neg\dlantform{X} & \dlconform{X_1 \aapp X_2} & = & \dlconform{X_1} \vee \dlconform{X_2}\\
%\end{array}\]
%For any consecution $\seq{X}{Y}$ we define its \emph{formula interpretation} to be
%$\seq{\dlantform{X}}{\dlconform{Y}}$.
%\end{defn}
%
%\begin{defn}[Antecedent and consequent parts]
%\label{defn:parts} A \emph{part} of a structure $X$ is an occurrence of one of its substructures.
%We classify the parts of $X$ as either \emph{positive} or \emph{negative} in
%$X$ as follows:
%\begin{itemize}
%\item $X$ is a positive part of itself;
%\item a negative / positive part of $X$ is
% a positive / negative part of $\ainv X$;
%\item a positive / negative part of $X_1$ or %of
% $X_2$ is a positive / negative part of $X_1 \aapp X_2$.
%\end{itemize}
%$Z$ is said to be an \emph{antecedent / consequent part} of a
%consecution $\seq{X}{Y}$ if it is a positive / negative part of $X$ or a
%negative / positive part of $Y$.
%\end{defn}
%\begin{defn}[Display-equivalence]
%\label{defn:displayeq} We define \emph{display-equivalence}
%$\displayeq$ to be the least equivalence on consecutions containing the (symmetric) relation $\dpost$ given by
%the following \emph{display postulates}:
%\[\begin{array}{c@{\hspace{0.3cm}}c@{\hspace{0.3cm}}c@{\hspace{0.3cm}}c@{\hspace{0.3cm}}c}
%\seq{X ; Y}{Z} & \dpost & \seq{X}{\ainv Y ; Z} & \dpost & \seq{Y ; X}{Z} \\
%\seq{X}{Y ; Z} & \dpost & \seq{X ; \ainv Y}{Z} & \dpost & \seq{X}{Z ; Y} \\
%\seq{X}{Y} & \dpost & \seq{\ainv Y}{\ainv X} & \dpost & \seq{\ainv\ainv X}{Y}
%\end{array}\]
%\end{defn}
%
%Note that Defn.~\ref{defn:displayeq} builds in the
%commutativity of $\aapp$ on the left and right of consecutions,
%i.e., we are assuming both $\&$ and $\vee$ commutative. %This
%%makes life slightly easier, but is not crucial to our
%%developments.
%\begin{proposition}[Display property]
%\label{prop:display} For any antecedent / consequent part $Z$
%of a consecution $\seq{X}{Y}$, one can construct a structure
%$W$ such that $\seq{X}{Y} \displayeq \seq{Z}{W}$ / $\seq{X}{Y}
%\displayeq \seq{W}{Z}$, respectively.
%\end{proposition}
%
%\begin{proof}(Sketch) For any $\seq{X}{Y}$, the display
% postulates of Defn.~\ref{defn:displayeq} allow us to display each of
% the immediate substructures of $X$ and $Y$ (as the
% antecedent or consequent as appropriate). The proposition follows by
% iterating. \qed\end{proof}
%\begin{comment}
%\label{com:logics}
%Under the formula interpretation of consecutions given by
%Definition~\ref{defn:consecution_validity}, certain of our display calculi
%can be understood as follows:
%\begin{description}
%\item[$\dcalc_{\mathrm{MLL}} \!=\!$]
%$\dcalc_0 + \assoc, \aunitcl, \aunitcr, \aunitwl, \aunitwr$ is multiplicative linear logic (LL);
%\item[$\dcalc_{\mathrm{MALL}} =$]
% $\dcalc_0^+ + \assoc, \aunitcl, \aunitcr, \aunitwl, \aunitwr$ is multiplicative additive LL;
%\item[$\dcalc_{\mathrm{CL}} =$]
% $\dcalc_0 + \assoc, \aunitcl, \aunitcr, \weak, \contr$ is
% standard classical propositional logic.
%\end{description}
%\end{comment}
\begin{figure}[h!]
{\small
\mbox{\bf Display postulates:}
\[\begin{array}{c@{\hspace{0.3cm}}c@{\hspace{0.3cm}}c@{\hspace{0.3cm}}c@{\hspace{0.3cm}}c}
\seq{X ; Y}{Z} & \dpost & \seq{X}{\ainv Y ; Z} & \dpost & \seq{Y ; X}{Z} \\&\\
\seq{X}{Y ; Z} & \dpost & \seq{X ; \ainv Y}{Z} & \dpost & \seq{X}{Z ; Y} \\&\\
\seq{X}{Y} & \dpost & \seq{\ainv Y}{\ainv X} & \dpost & \seq{\ainv\ainv X}{Y}
\end{array}\]\vspace{0.1cm}
\mbox{\bf Identity rules:}
\[\begin{array}{c@{\hspace{1.5cm}}c}%@{\hspace{0.8cm}}c}
\begin{prooftree}
\phantom{X'}
\justifies
\seq{P}{P}
\using \ax
\end{prooftree}
&
\begin{prooftree}
\seq{X'}{Y'}
\justifies
\seq{X}{Y}
\using \;\;\seq{X}{Y} \displayeq \seq{X'}{Y'} \;\;\display
\end{prooftree}
\end{array}\] \vspace{0.1cm}
\mbox{\bf Multiplicative logical rules:}
\[\begin{array}{c@{\hspace{0.6cm}}c@{\hspace{0.6cm}}c@{\hspace{0.6cm}}c}
\begin{prooftree}
\seq{\aemp}{X}
\justifies
\seq{\true}{X}
\using \truel
\end{prooftree}
&
\begin{prooftree}
\phantom{X}
\justifies
\seq{\aemp}{\true}
\using \truer
\end{prooftree}
&
\begin{prooftree}
\seq{F ; G}{X}
\justifies
\seq{F \conj G}{X}
\using \andl
\end{prooftree}
&
\begin{prooftree}
\seq{X}{F}
\phantom{w}
\seq{Y}{G}
\justifies \seq{X \aapp Y}{F \conj G}
\using \andr
\end{prooftree}
\\ & \\
\begin{prooftree}
\phantom{X}
\justifies
\seq{\false}{\aemp}
\using \falsel
\end{prooftree}
&
\begin{prooftree}
\seq{X}{\aemp}
\justifies
\seq{X}{\false}
\using \falser
\end{prooftree}
&
\begin{prooftree}
\seq{F}{X}
\phantom{w}
\seq{G}{Y}
\justifies
\seq{F \vee G}{X \aapp Y} \using \orl
\end{prooftree}
&
\begin{prooftree}
\seq{X}{F ; G} \justifies \seq{X}{F \vee G} \using \orr
\end{prooftree}
\\ & \\
\begin{prooftree}
\seq{\ainv F}{X} \justifies \seq{\neg F}{X} \using \negl
\end{prooftree}
&
\begin{prooftree}
\seq{X}{\ainv F} \justifies \seq{X}{\neg F} \using \negr
\end{prooftree}
&
\begin{prooftree}
\seq{X}{F}
\phantom{w}
\seq{G}{Y}
\justifies
\seq{F \implies G}{\ainv X \aapp Y} \using \impl
\end{prooftree}
&
\begin{prooftree}
\seq{X \aapp F}{G}
\justifies
\seq{X}{F \implies G}\using \impr
\end{prooftree}
\end{array}\]\vspace{0.1cm}
\mbox{\bf Additive logical rules:}
\[\begin{array}{c@{\hspace{0.6cm}}c@{\hspace{0.4cm}}c}
\begin{prooftree}
\phantom{X}
\justifies
\seq{\afalse}{X}
\using \afalsel
\end{prooftree}
&
\begin{prooftree}
\seq{F_i}{X}
\justifies
\seq{F_1 \aconj F_2}{X}
\using \;i \in \{1,2\}\,\aconjl
\end{prooftree}
&
\begin{prooftree}
\seq{F}{X}
\phantom{w}
\seq{G}{X}
\justifies
\seq{F \adisj G}{X} \using \adisjl
\end{prooftree}
\\ & \\
\begin{prooftree}
\phantom{X}
\justifies
\seq{X}{\atrue}
\using \atruer
\end{prooftree}
&
\begin{prooftree}
\seq{X}{F}
\phantom{w}
\seq{X}{G}
\justifies \seq{X}{F \aconj G}
\using \aconjr
\end{prooftree}
&
\begin{prooftree}
\seq{X}{F_i}
\justifies
\seq{X}{F_1 \adisj F_2}
\using \;i \in \{1,2\}\,\adisjr
\end{prooftree}
\end{array}\]\vspace{0.1cm}
\mbox{\bf Structural rules:}
\[\begin{array}{c@{\hspace{0.6cm}}c@{\hspace{0.6cm}}c@{\hspace{0.6cm}}c}
\begin{prooftree}
\seq{\aemp ; X}{Y} \justifies \seq{X}{Y} \using \aunitcl
\end{prooftree}
&
\begin{prooftree}
\seq{X}{Y ; \aemp} \justifies \seq{X}{Y} \using \aunitcr
\end{prooftree}
&
\begin{prooftree}
\seq{X}{Y} \justifies \seq{\aemp ; X}{Y} \using \aunitwl
\end{prooftree}
&
\begin{prooftree}
\seq{X}{Y} \justifies \seq{X}{Y ; \aemp} \using \aunitwr
\end{prooftree}
\\ & \\
\multicolumn{4}{c}{
\begin{prooftree}
\seq{(W ; X) ; Y}{Z}
\justifies
\seq{W ; (X ; Y)}{Z} \using \assoc
\end{prooftree}
\qquad
\begin{prooftree}
\seq{X}{Z}
\justifies
\seq{X ; Y}{Z}
\using \weak
\end{prooftree}
\qquad
\begin{prooftree}
\seq{X ; X}{Y}
\justifies
\seq{X}{Y}
\using \contr
\end{prooftree}}
\end{array}\]}\vspace{-0.4cm}
\caption{Display calculus proof rules.
In the display rule $\display$, the relation $\displayeq$
is the least equivalence containing the relation $\dpost$
given by the display postulates.
Note that all our formalisation, and all our results,
omit the $\to$-connective and its rules.
\label{fig:rules}}
\end{figure}
% \end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "root"
%%% End:
%
\section{Interpolation for Display Calculi}
\label{sec:preliminaries}
% this section should be an intro to the system and work of Brotherston and
% Gor\'e
% Brotherston and Gor\'e \cite{brotherston-gore-interpolation}
% define a system containing structural connectives
% `;' (binary), `\#' (unary) and $\emptyset$ (nullary).
% They give display postulates sufficient to display any substructure;
% the form of their display postulates also builds in commutativity of `;`
% on both left and right of consecutions.
%
% They give structural rules for associativity, unit-weakening,
% unit-contraction, contraction and weakening.
% In our work the rules for associativity, unit-weakening and unit-contraction
% are often needed and we assume that they are always present.
In traditional sequent calculi, it is fairly straightforward to decorate each
rule with interpolants by building up the interpolant for the conclusion
sequent from the interpolants for the premise sequents.
this approach is harder in display calculi since the sequent $X \vdash Y$
goes through many transformations while displaying some substructure $Z$.
Brotherston \& Gor\'e therefore consider the following ``$\LADI$'' property
\cite[Definition~3.4]{brotherston-gore-interpolation}, where $\adeq$ is the equivalence obtained by
combining display equivalence with applications of associativity $\assoc$ if present in the calculus:
\begin{description}
\item[LADI:] a rule with premises $\mathcal C_i$ and conclusion
$\mathcal C$ satisfies the \underline{l}ocal $AD$
\underline{d}isplay \underline{i}nterpolation property ($\LADI$) if
for all premises $\mathcal C_i$, all sequents $\mathcal
C_i'$ such that $\mathcal C_i' \adeq \mathcal C_i$ satisfy the
interpolation property, then all sequents $\mathcal C'$ such that
$\mathcal C' \adeq \mathcal C$ satisfy the interpolation
property.
\end{description}
%\marginpar{Have we yet defined $\equiv_{AD}$?}
Although Brotherston and Gor\'e \cite{brotherston-gore-interpolation}
give the separate variants of the logical connectives
$\top, \bot, \land$ and $\lor$ for the
additive and multiplicative forms of the logical introduction rules,
we just use one connective for each of $\top, \bot, \land$ and $\lor$.
Although the additive and multiplicative forms are equivalent in the presence
of contraction and weakening,
\cite{brotherston-gore-interpolation} contains results which
are relevant to the situation where not all structural rules are included.
Thus they prove results for both the rules shown below,
even though the second rule is much easier to deal with.
$$
\frac{ X \vdash A \quad Y \vdash B}
{X, Y \vdash A \land B}
\qquad\qquad\qquad
\frac{ X \vdash A \quad X \vdash B}
{X \vdash A \land B}
$$
%In the work described here,
We first considered the second (additive) rule
shown; we subsequently developed a proof dealing with the first
(multiplicative) rule directly.
\section{The Isabelle mechanisation}
Our mechanisation builds on the work of Dawson \& Gor\'e
\cite{dawson-gore-cut-admissibility} in formalising Display Logic.
Some of our notation and choices of properties, lemmas, etc, are attributable
to this. In particular, we use \texttt{Comma}, \texttt{Star} and \texttt{I}
for `;', `\#' and `$\emptyset$'.
The work in \cite{dawson-gore-cut-admissibility} is a deep embedding of rules
and of the variables in them, and we have followed that approach here
(see \cite{dawson-gore-gls} for our understanding of what this means,
and for more details).
That is, we define a language of formulae and structures, which
contains explicit structure and formula variables, for which we define explicit
substitution functions.
We also define the rules as specific data structures
(of which there is a small finite number,
such as those in Figure~\ref{fig:rules}),
and infinitely many substitution instances of these rules.
\subsection{Formalising Display Logic in Isabelle}
An actual derivation in a Display Calculus involves structures
containing formulae which are composed of primitive propositions
(which we typically represent by $p,q,r$). It uses rules which are
\emph{expressed} using structure and formula variables, typically
$X,Y,Z$ and $A,B,C$ respectively, to represent structures and formulae
made up from primitive propositions. We are using a ``deep embedding'' of variables, so
our Isabelle formulation explicitly represents variables such as
$X,Y,Z$ and $A,B,C$, and defines substitution for them of
given structures and formulae, which may themselves contain variables.
Thus, in our Isabelle formulation we use
\texttt{PP} \textit{name},
\texttt{SV} \textit{name} and \texttt{FV} \textit{name} to represent
propositional, formula and structure variables respectively.
The operator \texttt{Structform} ``casts'' a formula into a structure.
We can then give recursive datatypes
\texttt{formula} and \texttt{structr} for formulas and structures respectively
(possibly parameterised by formula or structure variables),
in the obvious way.
Thus the datatype \texttt{formula} for formulae has constructors
\texttt{FV}, \texttt{PP} and the logical operators $\conj ~ \vee ~
\neg ~ \true ~ \false$
whereas the datatype \texttt{structr} for structures has constructors
\texttt{SV}, \texttt{Structform}
and the structure operators $\aapp ~ \ainv ~ \aemp$.
%We define types
%The constructor \texttt{PP} represents a primitive proposition variable $p$.
%Formulae are therefore represented by the datatype
%below:
%% in Figure~\ref{fig:isabelle-formulae}.
%\begin{verbatim}
%datatype formula =
% Btimes formula formula ("_ && _" [68,68] 67)
% | Bplus formula formula ("_ v _" [64,64] 63)
% | Bneg formula ("--_" [70] 70)
% | Btrue ("T")
% | Bfalse("F")
% | FV string
% | PP string
%\end{verbatim}
%
%Structures are represented by the datatype below:
%\begin{verbatim}
%datatype structr =
% Comma structr structr
% | Star structr
% | I
% | Structform formula
% | SV string
%\end{verbatim}
%The operator \texttt{Structform} ``casts'' a formula into
%a structure, since a formula is a special case of a structure.
%The notation in parentheses in the definition of datatype
%\texttt{formula}
%%Figure~\ref{fig:isabelle-formulae}
%describe an alternative infix syntax, closer to normal logical
%syntax. Some complex manipulation of the syntax, available
%through Isabelle's ``parse translations'' and ``print translations'',
%allows structure variables and constants to be prefixed by the symbol
%\texttt{\$}, and the notations \texttt{FV}, \texttt{SV} and
%\texttt{Structform} to be omitted.
A rule (type %\alpha% \texttt{psc})
is represented as a list of premises and a conclusion,
and a sequent by a Isabelle/HOL datatype:
\begin{verbatim}
types 'a psc = "'a list * 'a"
datatype sequent = Sequent structr structr
\end{verbatim}
A sequent \texttt{(Sequent X Y)} can also be represented as
\texttt{\$X |- \$Y}.
%Thus the term \texttt{Sequent (SV ''X'')
% (Structform (FV ''A''))} is printed, and may be entered, as
%\texttt{(\$''X'' |- ''A'')}.
Since a ``deep'' embedding requires handling substitution explicitly,
we defined functions to substitute for structure and
formula variables, in structures, sequents and rules.
In particular, we have an operator \texttt{rulefs},
where \texttt{rulefs} \textit{rules} is the set of substitution instances of
rules in the set \textit{rules}.
Also, when we refer to derivability using a set of rules,
this allows inferences using substitution instances of these rules,
and \texttt{derivableR} \textit{rules sequents} means the set of sequents which
can be derived from \textit{sequents} using \textit{rules}, instantiated.
\begin{verbatim}
derivableR :: "rule set => sequent set => sequent set
rulefs :: "rule set => rule set"
\end{verbatim}
We also use some general functions to describe derivability.
An inference rule of type \texttt{'a psc} is a list \texttt{ps} of premises
and a conclusion \texttt{c}. Then
\texttt{derl rls} is the set of
rules derivable from the rule set \texttt{rls} while
\texttt{derrec rls prems}
is the set of sequents derivable using rules \texttt{rls}
from the set \texttt{prems} of premises.
We defined these using Isabelle's package for inductively defined sets.
A more detailed expository account of these, with many useful lemmas,
is given in \cite{gore-india}.
\begin{verbatim}
derl :: "'a psc set => 'a psc set"
derrec :: "'a psc set => 'a set => 'a set"
\end{verbatim}
Note that these functions do not envisage instantiation of rules.
Thus we have the following relationship
between \texttt{derivableR} and \texttt{derrec}.
\begin{verbatim}
"derivableR ?rules == derrec (rulefs ?rules)"
\end{verbatim}
The ``deep embedding'' approach to rules enables us to express
properties of rules, such as that ``no structure variable appears in
both antecedent and succedent positions''. Such lemmas apply to all
display postulates satisfying conditions of this sort. We used this
in \cite{dawson-gore-cut-admissibility} in showing that
cut-admissibility applies whenever the structural rules were all of a
particular form (as in Belnap's cut elimination theorem). In regards
to interpolation, possible future work may include showing that
interpolation results hold whenever rules are of a particular form,
but our present work (except for some lemmas) do not do this.
The work in \cite{dawson-gore-cut-admissibility} is also a deep embedding of
proofs (where we took proof objects and explicitly manipulated them) but we
have \emph{not} done that here.
\subsection{Definitions relating to interpolation} \label{s-defs}
We define the following sets of rules:
\begin{description}
\item[\rm \texttt{dps}:] is the set of six display postulates
in~\cite[Definition 2.5]{brotherston-gore-interpolation}
(Display-equivalence);
\item[\rm \texttt{aidps}:] is \texttt{dps}, their inverses, and the
associativity rule (ie, 13 rules);
\item[\rm \texttt{ilrules}:] is the unit-contraction and unit-weakening
rules;
\item[\rm \texttt{rlscf}:] is the set of all rules of the logic as
shown in \cite[Figures 1 and 3]{brotherston-gore-interpolation},
plus \texttt{aidps};
that is, the rules of Figure~\ref{fig:rules}, except the
additive logical rules
(%to simplify the task,
and we omit throughout this work the derivable rules for implication $\to$);
\item[\rm \texttt{rlscf\_nw}:] is as \texttt{rlscf}, but excluding the
weakening rule.
\end{description}
\begin{definition}
We define several predicates to do with interpolation:
\begin{verbatim}
interp :: "rule set => sequent => formula => bool"
edi :: "rule set => rule set => sequent => bool"
ldi :: "rule set => rule set => sequent list * sequent => bool"
cldi :: "rule set => rule set => sequent list * sequent => bool"
\end{verbatim}
\begin{description}
\item[\rm \texttt{interp} \texttt{rules} $(X \vdash Y)$
\texttt{intp}:] iff \texttt{intp} is an interpolant for $X
\vdash Y$. Thus $X \vdash \mathtt{intp}$ and $\mathtt{intp} \vdash
Y$ are derivable using \texttt{rules} and the (formula) variables
in \texttt{intp} are among the formula variables of the structures $X$ and
$Y$;
\item[\texttt{edi} \texttt{lrules drules} $(X \vdash Y)$:]
(\emph{Extended Display Interpolation}) iff for all sequents
$X' \vdash Y'$ from which $X \vdash Y$ is derivable using
\texttt{lrules}, the sequent $X' \vdash Y'$ has an interpolant
defined in terms of derivability using \texttt{drules} where
\texttt{lrules} would typically be a set of display postulates;
\item[\texttt{ldi} \textit{lrules drules} $(ps, c)$:] (\emph{Local
Display Interpolation}) iff the rule $(ps, c)$ preserves the
property \texttt{edi}: that is, if, for all $p \in ps$, \texttt{edi}
\texttt{lrules drules} $p$ holds, then \texttt{edi} \texttt{lrules
drules} $c$ holds. Thus, if \texttt{lrules} is the set $AD$ of
rules (our \texttt{aidps}), and \texttt{drules} is the set of rules
of the logic, then the LADI-property~\cite[Definition
3.4]{brotherston-gore-interpolation} for rule $(ps, c)$ is
\texttt{ldi aidps drules} $(ps, c)$.
\end{description}
\end{definition}
Note that none of these definitions involves a condition that $X
\vdash Y$ be derivable. Of course, cut-admissibility would imply that
if $X \vdash Y$ has an interpolant then $X \vdash Y$ is derivable, but
we avoid proving or using cut-admissibility. Even so, in most cases
we do not need such a condition. However we do need $X \vdash Y$ in
the case of a sequent $I \vdash Y, \#X$ produced by weakening and
displaying $I$. Thus we need a predicate with that condition:
\begin{definition}[Conditional Local Display Interpolation] \\
\texttt{cldi} \texttt{lrules drules}
$(ps, c)$ holds iff: \\
if $c$ is is derivable using \texttt{drules}, then
\texttt{ldi} \texttt{lrules drules} $(ps, c)$ holds.
\end{definition}
We also need variants \texttt{interpn}, \texttt{edin}, \texttt{ldin}
and \texttt{cldin}, of these predicates, where the derivation of
interpolated sequents is from a given set of rules, rather than from
given rules and their substitution instances. We use these in lemmas
which involve rule sets which are not closed under substitution.
We mention here that many of our lemmas about these properties
assume, although we do not specifically say so, that $AD$ (rule set
\texttt{aidps}) is used as $lrules$ in the above definitions,
and that the derivation rules, $drules$ in the above definitions,
contain the $AD$ rules.
Lemma 3.5 of \cite{brotherston-gore-interpolation} says that if all
rules satisfy the local $AD$-interpolation property, then the calculus
has the interpolation property. In fact the stronger result,
Lemma~\ref{ldi-derl-cldi-ei}\ref{ldi-derl} (below) is true, that LADI is
preserved under derivation. But for the conditional local display
interpolation property, a result analogous to the first-mentioned,
only, of these results holds: see
Lemma~\ref{ldi-derl-cldi-ei}\ref{cldi-ex-interp}.
\begin{lemma}[\texttt{ldi\_derl, cldi\_ex\_interp}] \label{ldi-derl-cldi-ei}
\begin{enumerate}
\item \label{ldi-derl}
if each rule from a set of rules
satisfies the local display interpolation property,
then so does a rule derived from them;
\item \label{cldi-ex-interp}
if all the derivation rules satisfy the
conditional local $AD$-interpolation property,
then the calculus has the interpolation property.
\end{enumerate}
\end{lemma}
\subsection{Substitution of congruent occurrences} \label{s-cong}
In \cite[Lemmas 3.6, 3.7]{brotherston-gore-interpolation}
the concept of congruent occurrences of some structure $Z$ is used,
with substitution for such congruent occurrences.
Where two sequents $\mathcal C$ and $\mathcal C'$
are related by a display postulate, or sequence of them,
a particular occurrence of $Z$ in $\mathcal C$ will correspond to
a particular occurrence of $Z$ in $\mathcal C'$,
according to the sequence of display postulates used to obtain
$\mathcal C'$ from $\mathcal C$.
This concept looked rather difficult to define and express precisely
and formally: note that the in the notation in
\cite{brotherston-gore-interpolation}, $\mathcal C[Z/A] \equiv_{AD}
\mathcal C'[Z/A]$, the meanings of $\mathcal C[Z/A]$ and $\mathcal
C'[Z/A]$ depend on each other, because they refer to particular, corresponding,
instances of $A$ in $\mathcal C$ and $\mathcal C'$.
So we adopted the alternative approach, used successfully in
\cite{dawson-gore-cut-admissibility}:
rather than trying to define $\mathcal C'[Z/A]$ we would prove
that there exists a sequent (call it $\mathcal C'_{Z/A}$)
satisfying $\mathcal C[Z/A] \equiv_{AD} \mathcal C'_{Z/A}$
and satisfying the property that some occurrences of $A$ in $\mathcal C'$
are replaced by $Z$ in $\mathcal C'_{Z/A}$.
This approach turned out to be sufficient for all the proofs discussed here.
In previous work~\cite{dawson-gore-cut-admissibility},
we defined and used a relation \texttt{seqrep},
defined as follows.
\begin{definition}[\texttt{seqrep}]\label{d-seqrep}
\begin{verbatim}
seqrep : "bool => structr => structr => (sequent * sequent) set"
\end{verbatim}
$(U, V) \in \texttt{seqrep}\ b\ X\ Y$ means that
some (or all or none) of the occurrences of $X$ in $U$ are replaced by $Y$,
to give $V$; otherwise $U$ and $V$ are the same;
the occurrences of $X$ which are replaced by $Y$ must all be in succedent
or antecedent position according to whether $b$ is true or false.
\end{definition}
For this we write $U\ ^X\!\!\leadsto^Y V$,
where the appropriate value of $b$ is understood.
Analogous to \cite[Lemma~3.9]{brotherston-gore-interpolation}
we proved the following result
\begin{lemma}[\texttt{SF\_some\_sub}] \label{SF-some-sub}
% For a formula $F$ and structure $Z$, and a set of \texttt{rules},
For formula $F$, structure $Z$, and rule set \texttt{rules},
\footnote{In Lemma 3.9~\cite{brotherston-gore-interpolation}
this set of rules is the $AD$ rules}
if
\begin{enumerate}
\item the conclusions of \texttt{rules} do not contain formulae; and
\item the conclusion of a rule in \texttt{rules}
does not contain more than one occurrence of any structure variable; and
\item the \texttt{rules} obeys Belnap's $C4$ condition: when the
conclusion and a premise of a rule both contain a structure
variable, then both occurrences are in antecedent or both are in
succedent positions; and
\item \texttt{concl} is derivable from \texttt{prems} using
\texttt{rules}; and
\item \texttt{concl} $^F\!\!\leadsto^Z$ \texttt{sconcl}
\end{enumerate}
then there exists a list \texttt{sprems} (of the same length as \texttt{prems})
such that
\renewcommand\theenumi{\arabic{enumi}}
\renewcommand\labelenumi{(\theenumi)}
\begin{enumerate}
\item \texttt{sconcl} is derivable from \texttt{sprems} using
\texttt{rules}; and
\item \texttt{prem$_n$} $^F\!\!\leadsto^Z$ \texttt{sprem$_n$} holds
for corresponding members \texttt{prem$_n$} of \texttt{prems} and
\texttt{sprem$_n$} of \texttt{sprems}.
\end{enumerate}
\end{lemma}
% \begin{itemize}
% \item their conclusions do not contain formulae
% \item their structure variables are distinct
% \item (Belnap's $C4$ condition)
% when the conclusion and a premise of a rule both contain a structure variable,
% then both occurrences are in antecedent or both are in succedent positions
% \end{itemize}
% (in Lemma 3.9 this sset of rules is the $AD$ rules).
% \begin{lemma}[\texttt{SF\_some\_sub}] \label{SF-some-sub}
% Where, for a formula $F$ and structure $Z$,
% \begin{itemize}
% \item ``derivable'' means using a set of rules satisfying the conditions above
% \item if \texttt{concl} is derivable from \texttt{prems}
% \item if \texttt{concl} $^F\!\!\leadsto^Z$ \texttt{sconcl}
% \end{itemize}
% then there exists a list \texttt{sprems} such that
% \begin{itemize}
% \item \texttt{sconcl} is derivable from \texttt{sprems}
% \item if \texttt{prem$_n$} and \texttt{sprem$_n$}
% are corresponding members of \texttt{prems} and \texttt{sprems},
% then \texttt{prem$_n$} $^F\!\!\leadsto^Z$ \texttt{sprem$_n$}
% \end{itemize}
% \end{lemma}
% Note that although we will use this where \texttt{rules} is a set of
% display postulates, this theorem does not require that the rules have
% only a single premise.
% The proof of this result used some results proved previously for the
% cut-elimination work \cite{dawson-gore-cut-admissibility}, notably
% \texttt{extSubs}, which gives, essentially, a constructive expression
% for \texttt{sprems}.
\subsection{$\LADI$ property for unary logical rules} \label{s-unary}
Proposition 3.10 of \cite{brotherston-gore-interpolation}
covers the display postulates, the associativity rule,
and the nullary or unary logical introduction rules.
The first case ($(\equiv_D)$, that is, any sequence of display postulates) of
\cite[Proposition 3.10]{brotherston-gore-interpolation}
is covered by the following result (which holds independently of the choice of
set of derivation rules).
\begin{lemma}[\texttt{bi\_lrule\_ldi\_lem}] \label{l-dp-ldi}
Let rule $\rho$ be a substitution instance of a rule in $AD$.
Then $\rho$ has the LADI property.
\end{lemma}
With the next lemma we can handle
the rules $(Id)$, $(\top R)$ and $(\bot L)$. % of Proposition 3.10.
% would be trivial if it were true that nothing else is
% display-equivalent to their conclusions;
% this is not so, but we can use this lemma:
\begin{lemma}[\texttt{non\_bin\_lem\_gen}] \label{non-bin-lem-gen}
Assume the derivation rules include $\negl$ and $\negr$.
Let $\rho$ be a substitution instance of a rule in $AD$
whose premise does not contain any `;'.
If the premise of $\rho$ has an interpolant then so does its conclusion.
\end{lemma}
Since the conclusions of the three nullary rules
$(Id)$, $(\top R)$ and $(\bot L)$ clearly themselves have interpolants,
Lemma~\ref{non-bin-lem-gen} shows they satisfy the extended display
interpolation property, and so the rules have the LADI property.
\begin{proposition}
The rules $(Id)$, $(\top R)$ and $(\bot L)$ satisfy the LADI property.
\end{proposition}
% Recall that \texttt{aidps} is the set of all $AD$ rules,
% and \texttt{rlscf} is the set of all rules of the calculus.
The remaining cases of Proposition 3.10 are the logical introduction rules with
a single premise.
%
For these we use the four lemmas (of which one is shown)
\begin{lemma}[\texttt{sdA1}] \label{l-sdA1}
if the rule shown below left is
a logical introduction rule, and the condition in the middle holds,
then the rule shown below right
is derivable (ie, using $AD$ and the logical introduction rules)
$$\displaystyle \frac{Y' \vdash U}{Y \vdash U}
\qquad
W\ ^Y\!\!\leadsto^{Y'} W'
\qquad
\displaystyle \frac{W' \vdash Z}{W \vdash Z}$$
\end{lemma}
% \begin{lemma}[\texttt{sdA1}] \label{l-sdA1}
% if $\displaystyle \frac{Y' \vdash U}{Y \vdash U}$
% is a logical introduction rule, and $W\ ^Y\!\!\leadsto^{Y'} W'$,
% then $\displaystyle \frac{W' \vdash Z}{W \vdash Z}$
% is derivable (ie, using $AD$ and the logical introduction rules)
% \end{lemma}
Then from these lemmas we get
\begin{lemma}[\texttt{seqrep\_interpA}] \label{seqrep-interpA}
For the logical introduction rule shown below left,
if formula variables in $Y'$ also appear in $Y$,
the condition on the right holds,
and $I$ is an interpolant for $W' \vdash Z'$,
then $I$ is also an interpolant for $W \vdash Z$:
\[\displaystyle \frac{Y' \vdash U}{Y \vdash U}
\qquad
\qquad
W \vdash Z \ ^Y\!\!\!\leadsto^{Y'} W' \vdash Z'
\mbox{ (in antecedent positions) }
\]
\end{lemma}
% \begin{lemma}[\texttt{seqrep\_interpA}] \label{seqrep-interpA}
% if $\displaystyle \frac{Y' \vdash U}{Y \vdash U}$
% is a logical introduction rule,
% formula variables in $Y'$ also appear in $Y$,
% $W \vdash Z \ ^Y\!\!\!\leadsto^{Y'} W' \vdash Z'$ (in antecedent positions),
% and $I$ is an interpolant for $W' \vdash Z'$,
% then $I$ is also an interpolant for $W \vdash Z$
% \end{lemma}
Finally we get the following result which gives Proposition 3.10 for single
premise logical introduction rules (additive or multiplicative).
\begin{proposition}[\texttt{logA\_ldi}]
if $F$ is a formula, and the rule $(F\vdash)$ below is a logical introduction
rule, and the formula variables in $Y$ are also in $F$, then $(F\vdash)$
satisfies the LADI property:
\[
\displaystyle \frac{Y \vdash U}{F \vdash U}(F\vdash)
\]
\end{proposition}
% \begin{proposition}[\texttt{logA\_ldi}]
% if $\displaystyle \frac{Y \vdash U}{F \vdash U}$ ($F$ a formula)
% is a logical introduction rule
% where the formula variables in $Y$ are also in $F$,
% then that rule satisfies the LADI property
% \end{proposition}
This last result requires Lemma~\ref{SF-some-sub}.
% the use of \texttt{SF\_some\_sub} (above).
We have analogous results for a logical introduction rule
for a formula on the right.
% are \texttt{seqrep\_interpS} and \texttt{logS\_ldi}.
%\subsection{Structural Rules} \label{s-struct}
\begin{remark}
\label{remark}
At this stage, we have a general method for proving local display
interpolation for a given rule $\rho$, with premises $ps_\rho$ and conclusion
$c_\rho$: identify a relation $rel$ such that
\begin{enumerate}
\item \label{r1} $(ps_\rho, c_\rho) \in rel$
\item \label{r2} whenever $c \equiv_{AD} c_\rho$, we can find a list
$ps$ (often got from sequents in $ps_\rho$ using the
same sequence of display postulates which get $c$ from $c_\rho$)
such that $(ps, c) \in rel$, and $p \equiv_{AD} p_\rho$
for each $p \in ps$ and corresponding $p_\rho \in ps_\rho$
\item \label{r3} whenever $(ps, c) \in rel$, $c$ is derivable from $ps$
(not used except to prove \ref{r4})
\item \label{r4} whenever $(ps, c) \in rel$,
and each $p \in ps$ has an interpolant, then $c$ has an interpolant
(proof of this will normally use \ref{r3})
\end{enumerate}
\end{remark}
\subsection{$\LADI$ property for (unit) contraction}
\label{s-ldi-uc-ctr}
This is relatively easy for the unit-contraction rule:
the relation $rel$ is given by:
$(p, c) \in rel$ if $p$ is obtained from $c$ by deleting,
somewhere in $c$, some $\#^n\emptyset$,
and we get \ref{r2} using roughly the same
sequence of display postulates.
% We proved the local display interpolation property for the unit contraction
% rules and for the contraction rule.
\begin{lemma}[\texttt{ex\_box\_uc}]
if sequent $Cd$ is obtained from $C$ by deleting one occurrence of some
$\#^n \emptyset$, and if $Cd' \rightarrow_{AD}^* Cd$, then there exists
$C'$, such that $C' \rightarrow_{AD}^* C$, and
$Cd'$ is obtained from $C'$ by deleting one occurrence of $\#^n \emptyset$.
\end{lemma}
% We accidentally made the error of first proving
% \texttt{ex\_box\_uc'} (which swaps $C(Cd)$ with $C'(Cd')$)
% but subsequently discovered this was easier to prove anyway.
% But to fix the error we also needed \texttt{inv\_der\_aidps}.
The proof of this required a good deal of programming repetitive use
of complex tactics similar to (but less complex than)
those described in \S\ref{s-del}.
The following lemma gives \ref{r3} of the general proof method above.
\begin{lemma}[\texttt{delI\_der}]
If $(p, c) \in rel$ (defined above), and if the derivation rules include
$AD$ and the unit contraction rules, then $c$ is derivable from $p$
\end{lemma}
\begin{proposition}[\texttt{ldi\_ila, ldi\_ils}]
The unit contraction rules satisfy LADI. % the LADI property.
\end{proposition}
For the case of contraction, we defined a relation \texttt{mseqctr}:
$(C, C') \in \texttt{mseqctr}$ means that
$C'$ is obtained from $C$, by contraction of
substructures $(X,X)$ to $X$.
Contractions may occur (of different substructures) in several places or
none.
\begin{lemma}[\texttt{ex\_box\_ctr}]
if sequent $Cd$ is obtained from $C$ by contraction(s) of substructure(s),
and if $Cd' \rightarrow_{AD}^* Cd$, then there exists
$C'$, such that $C' \rightarrow_{AD}^* C$, and
$Cd'$ is obtained from $C'$ by substructure contraction(s).
\end{lemma}
\begin{proof}
The proof of \texttt{ex\_box\_ctr} is a little more complex that
that for unit-contraction, because (for example) when
$X; Y \vdash Z \equiv_{AD} X \vdash Z; \#Y$, and $X; Y \vdash Z$ is
obtained by contracting $(X; Y) ; (X; Y) \vdash Z$, we need to show
$(X; Y) ; (X; Y) \vdash Z \equiv_{AD} X;X \vdash Z; \#(Y;Y)$
\end{proof}
\begin{lemma}[\texttt{ctr\_der}]
If $(p, c) \in \texttt{mseqctr}$ (defined above),
and if the derivation rules include
$AD$ and the left contraction rule, then $c$ is derivable from $p$
\end{lemma}
\begin{proposition}[\texttt{ldi\_cA}]
The left contraction rule satisfies the LADI property.
\end{proposition}
% (Note that the system of \cite{brotherston-gore-interpolation}
% does not give a rule for contraction on the right,
% it is derived from the rule for contraction on the left).
\subsection{Deletion Lemma (\cite{brotherston-gore-interpolation}, Lemma 4.2)}
\label{s-del}
For weakening or unit-weakening, it is more difficult:
a sequence of display postulates applied to the conclusion
$X; \emptyset \vdash Y$ may give $\emptyset \vdash Y; \# X$,
so the same or similar sequence cannot be applied to the premise $X \vdash Y$.
For this situation we need Lemma 4.2 (Deletion Lemma)
of \cite{brotherston-gore-interpolation}:
this result says that for
$F$ a formula sub-structure occurrence in $C$, or $F = \emptyset$,
and $C \rightarrow_{AD}^* C'$,
then (in the usual case) $C \setminus F \rightarrow_{AD}^* C' \setminus F$,
where $C \setminus F$ and $C' \setminus F$ mean
deleting only particular occurrence(s) of $F$ in $C$,
and deleting the \emph{congruent}
(corresponding) occurrence(s) of $F$ in $C'$,
where congruence is determined
by the course of the derivation of $C'$ from $C$.
We did not define congruent occurrences in this sense:
see the general discussion of this issue in \S\ref{s-cong}.
It seemed easier to define and use the relation \texttt{seqdel}:
\begin{definition}[\texttt{seqdel}]
Define $(C, C') \in \texttt{seqdel} ~\textit{Fs}$ to mean that $C'$
is obtained from $C$ by deleting one occurrence in $C$ of a
structure in the set \textit{Fs}.
\end{definition}
Then we proved the following result about deletion of a formula:
\begin{lemma}[\texttt{deletion}]
Let $F$ be a formula or $F = \emptyset$.
If sequent $Cd$ is obtained from $C$ by deleting an occurrence of some $\#^iF$,
and if $C \rightarrow_{AD}^* C'$, then either
\begin{enumerate}
\item
there exists $Cd'$, such that $Cd \rightarrow_{AD}^* Cd'$, and
$Cd'$ is obtained from $C'$ by deleting an occurrence of some $\#^jF$, or
\item
$C'$ is of the form $\#^nF \vdash \#^m (Z_1 ; Z_2)$ or
$\#^m (Z_1 ; Z_2) \vdash \#^nF$, where
$Cd \rightarrow_{AD}^* (Z_1 \vdash \# Z_2)$, or
$Cd \rightarrow_{AD}^* (\# Z_1 \vdash Z_2)$
\end{enumerate}
\end{lemma}
\begin{proof}
Thus the premise is that $Cd$ is got from $C$ by deleting
instance(s) of the substructure formula $F$, possibly with some \# symbols.
%
The main clause of the result says that there exists $Cd'$
(this corresponds to $C' \setminus F$ in \cite{brotherston-gore-interpolation})
which is got from $Cd$ by deleting instance(s) of $\#^nF$ (for some $n$),
but there is also an exceptional case where $\#^nF$ is alone
on one side of the sequent.
The proof of this result required considerable ML programming of proof tactics.
% The file \texttt{Del.ML} is devoted to these tactics and the intermediate
% results proved for the proof of Lemma 4.2.
When we get cases as to the last rule used in the derivation
$C \rightarrow_{AD}^* C'$, this gives 13 possibilities.
For each rule there are two main cases for the shape of the sequent
after the preceding rule applications:
in the first, $\#^nF$ appears in $\#^nF, Z$ or $Z, \#^nF$
and so could be deleted ($F$ is ``delible''), and
in the second,
the relevant occurrence of $\#^nF$ is the whole of one side of the sequent.
Then where, in the case of the associativity rule for example,
the sequent which is $(X; Y) ; Z \vdash W$ (instantiated) has $F$ delible,
$\#^nF$ may be equal to $X,Y$ or $Z$, or may be delible from $X,Y,Z$ or $W$.
% (Certain further cases, such as that $\#^nF$ is $(X ; Y)$,
% get eliminated automatically using results such as
% \texttt{stars\_Sf\_not\_Comma}, below).
% The tactics \texttt{dvitacs} have been written to provide one set of tactics
% which handle all these cases.
% The key component is the recursive tactic \texttt{sdvitac}
% which searches for a way of showing that $F$ is delible from a given sequent
% (say $X; (Y ; Z) \vdash W$, in the above example).
Without the possibility of programming a tactic in Standard ML
to deal with all these possibilities, each of these seven cases,
and a similar (less numerous) set of cases for each of the
other 12 rules, would require its own separate proof.
For the second case, where $\#^nF$ is equal to one side of the sequent
($W$ in the above example),
a variety of tactics is required:
for those display rules which move the comma from one side to the other
% the tactics \texttt{mdiatacs} works for all,
one function works for all,
but the other cases have to be proved individually.
%
% The proof threw up a number of (logically)
% trivial cases which nonetheless needed particular results to be included
% as lemmas to be used automatically in simplification, such as
% that we cannot have $X; Y = \#^n F$, for $F$ a formula.
\qed
\end{proof}
We then proved this result for $F = \emptyset$ instead of a formula,
to give a theorem \texttt{deletion\_I};
the changes required in the proof were trivial.
\subsection{$\LADI$ property for (unit) weakening rules}
\label{s-ldi-wk}
To handle weakening in a similar way, we considered two separate rules,
one to weaken with instances of $\#^n \emptyset$ and one
to change any instance of $\emptyset$ to any formula.
Thus, where $Y_\emptyset$ means a structure like $Y$ but with every formula or
structure variable in it changed to $\emptyset$, a weakening is produced as
shown:
$$ X \vdash Z \Longrightarrow
X, Y_\emptyset \vdash Z \Longrightarrow
X, Y \vdash Z$$
% \vspace{-3ex}
% \begin{center}
% \bpf
% \A "$X \vdash Z$"
% \U "$X, Y_\emptyset \vdash Z$"
% \U "$X, Y \vdash Z$"
% \epf
% \end{center}
We first consider the second of these,
replacing any instance of $\emptyset$ with a structural atom,
that is, a formula or a structure variable
which are atomic so far as the structure language is concerned.
We use the relation \texttt{seqrepI str\_atoms}:
$(c, p) \in \texttt{seqrepI str\_atoms}$ means that some occurrences of
$\emptyset$ in $p$ are changed to structural atoms in $c$.
% Note that \texttt{$(C, Cd) \in $ seqrepI $\Fs$}
% means that some occurrences in $C$
% of structures in $\Fs$ are replaced by $I$ in $Cd$.
\begin{lemma}[\texttt{ex\_box\_repI\_atoms}]
If sequent $C$ is obtained from $Cd$ by replacing $\emptyset$ by structural
atoms,
and if $C' \rightarrow_{AD}^* C$, then there exists
$Cd'$, such that $Cd' \rightarrow_{AD}^* Cd$, and
$C'$ is obtained from $Cd'$ by replacing $\emptyset$ by structural
atoms.
\end{lemma}
For this relation, property \ref{r2} was quite easy to prove,
since exactly the same sequence of $AD$-rules can be used.
We proved that there are derived rules permitting replacing instances of
$\emptyset$ by anything, and this gave us that such rules, where the
replacement structure is a formula or structure variable, have the
the local display interpolation property.
\begin{lemma}[\texttt{seqrepI\_der}]
If the derivation rules include weakening and unit-contraction,
and $(c, p) \in \texttt{seqrepI}\ \Fs$,
ie some occurrences of $\emptyset$ in $p$ are
replaced by anything to give $c$, then $c$ is derivable from $p$.
\end{lemma}
The next lemma gives the LADI property, not for a rule of the system,
but for inferences $([p], c)$ where $(c, p) \in \texttt{seqrepI str\_atoms}$.
\begin{proposition}[\texttt{ldi\_repI\_atoms}] \label{ldi-repI-atoms}
Where $(c, p) \in \texttt{seqrepI str\_atoms}$,
ie some occurrences of $\emptyset$ in $p$ are
replaced by structural atoms to give $c$,
$([p], c)$ has the LADI property.
\end{proposition}
Next we consider the structural rules allowing insertion of $\#^n \emptyset$.
We use the variant of the theorem \texttt{deletion} (see \S\ref{s-del})
which applies to deletion of $\emptyset$ rather than of a formula.
Then we show that % the result of
inserting occurrences of anything preserves derivability.
% although we need it only for inserting occurrences $\#^n \emptyset$.
\begin{lemma}[\texttt{seqwk\_der}]
If the derivation rules include weakening,
and $(c, p) \in \texttt{seqdel}\ \Fs$,
ie, $c$ is obtained from $p$ by weakening,
then $c$ is derivable from $p$.
\end{lemma}
Then we need the result that such rules satisfy the local display interpolation
property.
In this case, though, where a sequent containing $\emptyset$
is rearranged by the display postulates such that the $\emptyset$
is alone on one side (such as where
$X \vdash Y; \emptyset$ is rearranged to $X; \#Y \vdash \emptyset$),
then to prove the LADI property requires using the derivability
of $X \vdash Y$ rather than the fact that $X \vdash Y$ satisfies LADI.
Thus we can prove only the conditional local display interpolation property.
\begin{proposition}[\texttt{ldi\_wkI\_alt}] \label{ldi-wkI}
% Provided the conclusion is derivable,
If the derivation rules include unit weakening, unit contraction,
and the left and right introduction rules for $\top$ and $\bot$,
% why all of these - compare ldi_wkI ??
then a rule for $\#^n\emptyset$-weakening
(ie, inserting $\#^n\emptyset$)
satisfies the conditional LADI property.
\end{proposition}
% \subsection{Completing the proof for the system with weakening}
% following material doesn't belong under the previous subsection heading
% but adding a new heading goes over 15 pages
At this point we also proved that the additive forms of the binary
logical introduction rules satisfy the LADI property.
The proofs are conceptually similar to those for the unary
logical introduction rules --- but more complex where single
structures/sequents become lists of these entities.
For reasons of space, and because we proceed to deal with the
more difficult multiplicative forms of the binary
introduction rules, we give the details in Appendix~\S\ref{s-lair2}.
Now we can give the result for a system which contains
weakening, contraction, and the binary rules in either additive or
multiplicative form.
To get this we define a set of rules called \texttt{ldi\_rules\_a},
which contains the additive binary logical rules,
and does not contain the weakening rules
but does contain the relations of Propositions~\ref{ldi-wkI} and
\ref{ldi-repI-atoms}.
We have that all of its rules satisfy the conditional LADI property,
so the system has interpolants.
We show this gives a deductive system equivalent to the given set of rules
\texttt{rlscf}, which system therefore also has interpolants.
Details are similar to the derivation of Theorem~\ref{rlscf-nw-interp}.
\begin{theorem}[\texttt{rlscf\_interp}]
The system of substitutable rules \texttt{rlscf}
satisfies display interpolation
\end{theorem}
\subsection{$\LADI$ property for binary multiplicative logical rules}
\label{s-mult}
%As discussed above, these were handled, indirectly, using the
%corresponding binary additive logical introduction rules, and weakening.
%However the possibility arises of a system which doesn't have the weakening
%rules and uses the multiplicative rules.
%So in this section we deal with these rules directly.
Here, we just consider the multiplicative version of these rules; the case of
their additive analogues is similar, but easier.
We deal with these rules in two stages ---
firstly, weakening in occurrences of $\#^n\emptyset$,
then changing any occurrence of $\emptyset$ to any structural atom,
as shown below.
\vspace{-3ex}
\begin{center}
\bpf
\A "$X \vdash A$"
\U "$X, Y_\emptyset \vdash A$" "$wk_\emptyset^*$"
\A "$Y \vdash B$"
\U "$X_\emptyset, Y \vdash B$" "$wk_\emptyset^*$"
\B "$X, Y \vdash A \land B$" "(\texttt{ands\_rep})"
\epf
\end{center}
Here $X_\emptyset$ and $Y_\emptyset$ mean the structures $X$ and $Y$,
with each structural atom (formula or uninterpreted structure variable)
replaced by $\emptyset$.
The first stage, the inferences labelled $wk_\emptyset^*$ above,
are obtained by repeatedly
weakening by occurrences of $\#^n\emptyset$ in some substructure.
The second stage (for which we define the relation \texttt{ands\_rep}),
consists of changing the $X_\emptyset$ of one premise, and the $Y_\emptyset$
of the other premise, to $X$ and $Y$ respectively.
For the second of these stages, then,
when any sequence of display postulates is applied to
$X, Y \vdash A \land B$, the same sequence can be applied to the two premises,
$X, Y_\emptyset \vdash A$ and $X_\emptyset, Y \vdash B$.
This simplifies the proof of local display interpolation for these rules.
For the first stage we proceed as described for \S\ref{s-ldi-wk},
using Proposition~\ref{ldi-wkI} to show that the inferences labelled
$wk_\emptyset^*$ satisfy the conditional LADI property.
The second stage consists of the rule shown as \texttt{ands\_rep}
in the diagram.
Considering the four points in Remark~\ref{remark},
since any display postulate applied to the conclusion can be applied to the
premises, we need to define a suitable relation between conclusion and premises
which is preserved by applying any display postulate to them.
%
For this we define a relation \texttt{lseqrepm} between sequents,
analogous to \texttt{seqrep} (\S\ref{s-cong}, Definition~\ref{d-seqrep})
and \texttt{lseqrep} (\S\ref{s-lair2}, Definition~\ref{d-lseqrep}):
\begin{verbatim}
lseqrepm :: "(structr * structr list) set =>
bool => [structr, structr list] => (sequent * sequent list) set"
\end{verbatim}
\begin{definition}[\texttt{lseqrepm, repnI\_atoms}]
\begin{enumerate}
\item
$(U, \Us) \in \texttt{lseqrepm}\ orel\ b\ Y\ \Ys$ means that
%(as for \texttt{lseqrep})
there is one occurrence of $Y$ in $U$ which
is replaced by the $n$th member of $\Ys$ in the $n$th member of $\Us$;
this occurrence is at an antecedent or succedent position, according to whether
$b$ is \texttt{True} or \texttt{False}.
However elsewhere in $U$, each structural atom $A$ in $U$ is replaced by
the $n$th member of $\As$ in the $n$th member of $\Us$, where
$(A, \As) \in orel$;
\item
$(A, \As) \in \texttt{repnI\_atoms}$ iff one of the $\As$ is $A$ and the rest of
the $\As$ are $\emptyset$.
\end{enumerate}
\end{definition}
We use \texttt{lseqrepm} only with $orel = \texttt{repnI\_atoms}$.
%
For example, for the ($\land$R) rule, we use
$\texttt{lseqrepm\ repnI\_atoms\ True}\ (A \land B)\ [A, B]$.
as the relation $rel$ of the four points in Remark~\ref{remark}.
%
We get the following lemmas.
\begin{lemma}[\texttt{repm\_some1sub}]
Whenever $Y$ is a formula, and \\
$(U, \Us) \in \texttt{lseqrepm}\ orel\ b\ Y\ \Ys$,
and $U$ is manipulated by a display
postulate (or sequence of them) to give $V$,
then the $\Us$ can be manipulated by the same display postulate(s)
to give $\Vs$ (respectively), where $(V, \Vs) \in rel$
\end{lemma}
The following lemmas refer to derivation in the system
containing the \texttt{ands\_rep} rule (not the regular ($\land$R) rule),
and also unit-weakening and unit-contraction.
\begin{lemma}[\texttt{ands\_mix\_gen}]
Whenever $(V, \Vs) \in rel$, then $V$ can be derived from the $\Vs$.
\end{lemma}
%As with Lemma~\ref{lseqrep-interpA},
This lemma relies on taking
the conjunction or disjunction of interpolants of premises.
So the next two results require a deductive system containing
the \texttt{ands\_rep} and \texttt{ora\_rep} rules, and also the
($\lor$R) and ($\land$L) rules.
\begin{lemma}[\texttt{lseqrepm\_interp\_andT}]
Whenever $(V, \Vs) \in rel$, then we can construct an interpolant for $V$
from interpolants for the $\Vs$
\end{lemma}
\begin{proposition}[\texttt{ldin\_ands\_rep}]
The rule \texttt{ands\_rep} satisfies LADI.
\end{proposition}
We recall from \S\ref{s-defs} the set \texttt{rlscf\_nw} of substitutable rules,
the rules of Figure~\ref{fig:rules},
except the additive logical rules and weakening.
From this set we define a set of rules called \texttt{ldi\_add}
by omitting from \texttt{rlscf\_nw} the the binary logical rules
$(\lor L)$ and $(\land R)$, but including the rule \texttt{ands\_rep}
(see diagram above) and a corresponding rule \texttt{ora\_rep}.
Note that these latter rules, and therefore \texttt{ldi\_add},
are not closed under substitution.
(Note that, as mentioned earlier, our formalisation had not included the
connective $\to$, or any rules for it).
% Definition 2.4 and Figures 1 and 3
% of \cite{brotherston-gore-interpolation}, except weakening.
\begin{lemma}[\texttt{ldi\_add\_equiv}]
% Let $C$ be the calculus consisting of the rules of Figure~\ref{fig:rules},
% % Definition~2.4 and Figures~1 and~3 of~\cite{brotherston-gore-interpolation}
% minus the additive logical rules and the weakening rule.
% Then the calculi
% WRONG
% $C \setminus \{(\land\mathrm{R}), (\lor\mathrm{L})\}$ and
% $C \cup \{\mathtt{ands\_rep}, \mathtt{ora\_rep}\}$
The calculi \texttt{ldi\_add} and \texttt{rlscf\_nw} (defined above)
are deductively equivalent.
\end{lemma}
\begin{theorem}[\texttt{ldi\_add\_interp, rlscf\_nw\_interp}]
\label{rlscf-nw-interp}
\begin{enumerate}
\item the system \texttt{ldi\_add} satisfies display interpolation
\item the system of substitutable rules
\texttt{rlscf\_nw} satisfies display interpolation
\end{enumerate}
\end{theorem}
\begin{proof}
We have all rules in \texttt{ldi\_add} satisfying at least the conditional
local display interpolation property (\texttt{ldi\_add\_cldin}).
By \texttt{cldin\_ex\_interp}, this gives us that the system \texttt{ldi\_add}
satisfies display interpolation \texttt{ldi\_add\_interp},
and so therefore does the equivalent system of substitutable rules
\texttt{rlscf\_nw}.
\end{proof}
\section{Conclusions}
\label{s-concl-fw}
As we have seen, interpolation proofs for display calculi
are very technical,
due to the inherent complexity of mixing the display principle with the
definition of interpolation.
As a consequence of this,
the proof of Brotherston and Gor\'e~\cite{brotherston-gore-interpolation}
is very technical, and most of the proofs were only sketched,
leaving the potential for errors.
Consequently it is valuable to have confirmed the correctness of their result
using an mechanised theorem prover.
And while the detailed proofs have only been sketched in this paper too,
the files containing the Isabelle proofs enable the proofs to be examined
to any desired level of detail.
This work has illustrated some interesting issues in the use of a mechanised
prover.
We have indicated where we found it necessary to follow a (slightly) different
line of proof.
This arose where their proof involved looking at corresponding parts of two
display-equivalent sequents --- an intuitively clear notion, but one which
seemed so difficult to formalise that a different approach seemed easier.
%
The two-stage approach used in \S\ref{s-mult} is also somewhat different from
the proof in \cite{brotherston-gore-interpolation}.
This work illustrated the enormous value of having a prover with a programmable
user interface. Isabelle is written in Standard ML, and (for its older
versions) the user interacts with it using that language.
This proved invaluable in the work described in \S\ref{s-ldi-uc-ctr} and
\S\ref{s-del}, where we were able to code up sequences of
attempted proof steps which handled enormous numbers of cases efficiently.
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "root"
%%% End:
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Here begins the end matter
% \bibliographystyle{plain}
% \bibliography{root}
\begin{thebibliography}{10}
\bibitem{mechanisation}
Isabelle/HOL mechanisation of our interpolation proofs.
\newblock Available at \url{http://users.cecs.anu.edu.au/~jeremy/isabelle/2005/interp/}.
\bibitem{belnap-display}
N~D {Belnap}.
\newblock Display logic.
\newblock {\em J.\ of Philosophical Logic}, 11:375--417, 1982.
\bibitem{brotherston-gore-interpolation}
\newblock James Brotherston \& Rajeev Gor\'e.
\newblock Craig Interpolation in Displayable Logics
\newblock In {\em Proceedings of {TABLEAUX-20}}, LNAI, pages 88--103. Springer,
2011.
\bibitem{Buss:98-I}
Buss, S.R.: Handbook of Proof Theory, chap. I: Introduction to Proof Theory.
Elsevier Science (1998)
\bibitem{Caniart:10}
Nicolas Caniart.
\newblock {\sc Merit}: an interpolating model-checker.
\newblock In {\em Proceedings of CAV-22}, volume 6174 of {\em LNCS}, pages
162--166. Springer, 2010.
\bibitem{Craig:57-2}
William Craig.
\newblock Three uses of the {H}erbrand-{G}entzen theorem in relating model
theory and proof theory.
\newblock {\em Journal of Symbolic Logic}, 22(3):269--285, 1957.
\bibitem{dawson-gore-cut-admissibility}
J~E Dawson and R Gor{\'e}.
\newblock Formalised Cut Admissibility for Display Logic.
\newblock In Proc.\ TPHOLS'02, LNCS 2410, 131--147, Springer, 2002.
\bibitem{dawson-gore-gls}
J~E Dawson and R Gor{\'e}.
\newblock Generic Methods for Formalising Sequent Calculi
Applied to Provability Logic.
\newblock In Proc.\ Logic for Programming, Artificial Intelligence and
Reasoning (LPAR 2010), LNCS 6397, 263-277.
\bibitem{Feferman:08}
Solomon Feferman.
\newblock Harmonious logic: {C}rai{g’s} interpolation theorem and its
descendants.
\newblock {\em Synthese}, 164:341--357, 2008.
\bibitem{gore-india}
R~Gor\'e. Machine Checking Proof Theory: An Application of Logic to Logic.
Invited talk, Third Indian Conference on Logic and
Applications, Chennai, January 2009.
\bibitem{McMillan:08}
Kenneth~L. McMillan.
\newblock Quantified invariant generation using an interpolating saturation
prover.
\newblock In {\em
TACAS}-14, volume 4963 of {\em LNCS}, pages
413--427, 2008.
\end{thebibliography}
\newpage
\appendix
\section{Isabelle text of selected definitions and theorems}
\subsection{Isabelle text of definitions and basic lemmas}
\begin{verbatim}
ldi_derl :
"[| ALL psc:?pscs. ldi ?lrules ?drules psc; (?ps, ?c) : derl ?pscs |] ==>
ldi ?lrules ?drules (?ps, ?c)"
cldi_ex_interp :
"[| (ALL psc : ?pscs. cldi ?lrules ?drules psc);
?c : derivableR ?drules {} |] ==> edi ?lrules ?drules ?c"
\end{verbatim}
\subsection{Isabelle text of lemmas for \S\ref{s-unary}, unary logical rules}
Belnap's C4 property is used in the proof of cut-elimination, it being one of
the properties that structural rules must satisfy for Belnap's cut-elimination
theorem to apply to a Display Calculus.
Here, \texttt{seqSVs'} $b ~ seq$ is a list of the structural variables
in succedent (if $b = True$) or antecedent (if $b = False$) position
in a sequent $seq$
\begin{verbatim}
C4_def : "C4 ?rule == ALL prem:set (premsRule ?rule).
ALL b. ALL s:set (seqSVs' b (conclRule ?rule)).
s ~: set (seqSVs' (~ b) prem)"
\end{verbatim}
\begin{verbatim}
SF_some_sub :
"[| ALL (ps, c):PC ` ?rules. ~ seqCtnsFml c & distinct (seqSVs c);
ALL r:?rules. C4 r; (?prems, ?concl) : derl (PC ` rulefs ?rules);
(?concl, ?sconcl) : seqrep ?sa (Structform ?fml) ?Z |]
==> EX sprems.
(?prems, sprems) : seqreps ?sa (Structform ?fml) ?Z &
(sprems, ?sconcl) : derl (PC ` rulefs ?rules)"
\end{verbatim}
Lemma~\ref{l-dp-ldi} is actually proved more generally.
We define an \emph{invertible set} of rules to be a set of unary rules such
that the inverse of any of them is derivable from the substitution instances of
them.
The set \texttt{aidps} of rules satisfies this property
(theorem \texttt{inv\_rules\_aidps})
\begin{verbatim}
inv_rules_def : "inv_rules ?rules == ALL r:?rules. EX p.
premsRule r = [p] & ([conclRule r], p) : derl (PC ` rulefs ?rules)"
inv_rules_aidps : "inv_rules aidps"
\end{verbatim}
Then Lemma~\ref{l-dp-ldi} actually holds for any invertible set of rules.
\begin{verbatim}
bi_lrule_ldi_lem : "[| ?r : rulefs ?lrules; inv_rules ?lrules |] ==>
ldi ?lrules ?drules (PC ?r)"
\end{verbatim}
\begin{verbatim}
non_bin_lem_gen "[| aidps <= ?drules; {nota, nots} <= ?drules;
(?ps, ?concl) : PC ` rulefs aidps;
ALL p:set ?ps. ~ seqHasComma p;
ALL p:set ?ps. Ex (interp ?drules p) |] ==>
Ex (interp ?drules ?concl)"
\end{verbatim}
\begin{verbatim}
tS_ldi : "ldi aidps rlscf ([], $I |- T)"
fA_ldi : "ldi aidps rlscf ([], F |- $I)"
idf_ldi : "ldi aidps rlscf ([], ?A |- ?A)"
\end{verbatim}
\begin{verbatim}
sdA1 : "[| ALL U. ([$?Y' |- $U], $?Y |- $U) : ?logI; strIsLog ?W;
(True, ?W, ?W') : strrep ?Y ?Y' |] ==>
([$?W' |- $?Z], $?W |- $?Z) : derl (?logI Un PC ` rulefs aidps)"
\end{verbatim}
\begin{verbatim}
seqrep_interpA : "[| ALL U. ([$?Y' |- $U], $?Y |- $U) : ?logI;
seqIsLog ($?W |- $?Z); strFVPPs ?Y' <= strFVPPs ?Y;
($?W |- $?Z, $?W' |- $?Z') : seqrep False ?Y ?Y';
?logI <= PC ` rulefs ?rules; aidps <= ?rules;
interp ?rules ($?W' |- $?Z') ?intp |] ==>
interp ?rules ($?W |- $?Z) ?intp"
\end{verbatim}
\begin{verbatim}
logA_ldi : "[| ALL (ps, c):PC ` aidps. ~ seqCtnsFml c & distinct (seqSVs c);
Ball aidps C4; strFVPPs ?Y <= fmlFVPPs ?fml; seqIsLog (?fml |- $?U);
ALL U. ([$?Y |- $U], ?fml |- $U) : ?logI;
?logI <= PC ` rulefs ?rules; aidps <= ?rules |] ==>
ldi aidps ?rules ([$?Y |- $?U], ?fml |- $?U)"
\end{verbatim}
We have results analogous top the above for a logical introduction rule
for a formula on the right,
are \texttt{seqrep\_interpS} and \texttt{logS\_ldi}.
\subsection{Binary Additive Logical Introduction Rules} \label{s-lair2}
We now discuss extending the results for unary logical introduction rules
to the binary rules in the additive form; that is, where the rule contains a
single structure variable which appears uniformly in the premises and
conclusion.
This involved, first, defining an analogue of \texttt{seqrep},
which we called \texttt{lseqrep}. As with \texttt{seqrep},
we use the notation
\textit{U} $^Y\!\!\leadsto^{Ys}$ \textit{Us}
\begin{verbatim}
lseqrep :
"bool => structr => structr list => (sequent * sequent list) set"
\end{verbatim}
\begin{definition}[\texttt{lseqrep}] \label{d-lseqrep} $(U, Us) \in
\texttt{lseqrep}\ b\ Y\ Ys$ means that there is some occurrence of
$Y$ in $U$ such that the $n$th member of $Us$ is obtained from $U$
by changing the occurrence of $\;Y$ to the $n$th member of $Ys$.
\end{definition}
\noindent
Note that, unlike \texttt{seqrep},
this definition involves just one occurrence of $Y$ in $U$.
% This simplification,
% that the replacement applies to exactly one occurrence of $Y$ in $U$,
% simplified the proofs considerably compared with the previous proof of the
% theorem \texttt{extSubs}, however the complication, that we have lists
% $Ys$ and $Us$, rather than single structures, complicated the proofs
% considerably.
% Thus we have a result \texttt{mextSubs} which is analogous to
% \texttt{extSubs}, a complicated result which gives, constructively,
% the \texttt{spremss} of the following theorem.
% \marginpar{How to reword to clarify?}
For this property we proved \texttt{SF\_some1sub}, analogous to
\texttt{SF\_some\_sub}, but where the rules must satisfy a slightly
stricter set of requirements:
\begin{lemma}[\texttt{SF\_some1sub}]
For a formula $F$, a list $Zs$ of structures and a rule set
\texttt{rules}, if
\begin{enumerate}
\item the conclusions of \texttt{rules} do not contain formulae ; and
\item each premise of a rule in \texttt{rules} contains the same structure
variables, in antecedent positions and in succedent positions, as
the conclusion; and
\item the structure variables of the conclusion and of each premise
of a rule in \texttt{rules} are distinct; and
\item if \texttt{concl} is derivable from \texttt{prems} using
\texttt{rules}; and
\item if \texttt{concl} $^F\!\!\leadsto^{Zs}$ \texttt{sconcls} holds
\end{enumerate}
then there exists a list \texttt{spremss} of lists of sequents
where
\renewcommand\theenumi{\arabic{enumi}}
\renewcommand\labelenumi{(\theenumi)}
\begin{enumerate}
\item \texttt{prem$_n$} $^F\!\!\leadsto^{Zs}$ \texttt{sprems$_n$}
holds for each \texttt{prem$_n$} in \texttt{prems} and each
corresponding member \texttt{sprems$_n$} of \texttt{spremss}; and
\item each member of \texttt{sconcls} is derivable from the
corresponding member of each list in \texttt{spremss} using \texttt{rules}.
\end{enumerate}
\end{lemma}
% In this case the rules must satisfy a slightly stricter set of requirements:
% \begin{itemize}
% \item their conclusions do not contain formulae
% \item each premise contains the same structure variables, in antecedent
% positions and in succedent positions, as the conclusion
% \item the structure variables of the conclusion and of each premise
% are distinct
% \end{itemize}
% \begin{lemma}[\texttt{SF\_some1sub}]
% Where, for a formula $F$ and list $Z$ of structures,
% \begin{itemize}
% \item ``derivable'' means using a set of rules satisfying the conditions above
% \item if \texttt{concl} is derivable from \texttt{prems}
% \item if \texttt{concl} $^F\!\!\leadsto^{Zs}$ \texttt{sconcls}
% \end{itemize}
% then there exists \texttt{spremss}
% (this is a list of lists of sequents) where
% \begin{itemize}
% \item for each \texttt{prem$_n$} in \texttt{prems},
% let \texttt{sprems$_n$} be the corresponding member of \texttt{spremss},
% then
% \texttt{prem$_n$} $^F\!\!\leadsto^{Zs}$ \texttt{sprems$_n$}
% \item
% each member of \texttt{sconcls} is derivable from the corresponding member of
% each list in \texttt{spremss}
% \end{itemize}
% \end{lemma}
Then, corresponding to Lemma~\ref{l-sdA1} (\texttt{sdA1}) in \S\ref{s-unary},
we have a lemma \texttt{msdA1}, % (and its three more counterparts).
like Lemma~\ref{l-sdA1} except that,
in its statement, $Y'$ and $W'$ can be lists.
Then, corresponding to Lemma~\ref{seqrep-interpA} (\texttt{seqrep\_interpA}),
we have the following lemma: again, the difference is that
in the statement of Lemma~\ref{seqrep-interpA},
we replace $Y'$ by a list of structures and $W' \vdash Z'$ by a list of
sequents.
\begin{lemma}[\texttt{lseqrep\_interpA}] \label{lseqrep-interpA}
If a logical introduction rule has conclusion ${Y \vdash U}$
and premises ${Y_i \vdash U}$ for $Y_i \in Ys$,
formula variables in each $Y_i$ also appear in $Y$,
$W \vdash Z \ ^Y\!\!\!\leadsto^{Ys} Ss$ (in antecedent positions),
and for each $S_i = W_i \vdash Z_i \in Ss$
there exists an interpolant,
then there exists an interpolant for $W \vdash Z$
\end{lemma}
\begin{proof}
There are two major cases in the proof:
all the sequents $W_i$ are the same, or all the sequents $Z_i$ are the same.
This is because the relation {S} $^{Z}\!\!\leadsto^{Zs}$ {Ss}
means that there is exactly one location in $S$ where the $Ss$ differ from $S$.
In those cases the proof uses the conjunction or disjunction, respectively,
of a list of interpolants.
Of course this idea is taken from the proof of
\cite[Theorem 3.10]{brotherston-gore-interpolation}. % (Binary Rules).
\end{proof}
Thence we get the result \texttt{mlogA\_ldi}, analogous to
\texttt{logA\_ldi}, which basically says that additive logical rules satisfy the
local display interpolation property.
% Analogous results for a logical introduction rule for a formula on the right
% are \texttt{lseqrep\_interpS} and \texttt{mlogS\_ldi}.
\begin{proposition}[\texttt{mlogA\_ldi}]
For a formula $F$, if a logical introduction rule $\rho$ has
conclusion ${F \vdash U}$ and premises ${Y_i \vdash U}$ for
$Y_i \in Ys$, and formula variables in each $Y_i$ also appear in
$Y$, then $\rho$ satisfies the LADI property.
\end{proposition}
\subsection{Isabelle text of lemmas for \S\ref{s-lair2},
additive binary logical rules}
\begin{verbatim}
SF_some1sub : "[| ALL (ps, c):PC ` ?rules.
~ seqCtnsFml c & distinct (seqSVs c) & seqIsLog c &
Ball (set ps) seqIsLog &
(ALL p:set ps. distinct (seqSVs p) &
(ALL b. set (seqSVs' b p) = set (seqSVs' b c)));
Ball ?rules C4; (?prems, ?concl) : derl (PC ` rulefs ?rules);
(?concl, ?sconcls) : lseqrep ?sa (Structform ?fml) ?Zs |] ==>
EX spremss. (?prems, spremss) : lseqreps ?sa (Structform ?fml) ?Zs &
(ALL n
Ex (interp ?rules ($?W |- $?Z))"
\end{verbatim}
\begin{verbatim}
mlogA_ldi : "[| ALL (ps, c):PC ` aidps. ~ seqCtnsFml c &
distinct (seqSVs c) & seqIsLog c & Ball (set ps) seqIsLog &
(ALL p:set ps. distinct (seqSVs p) &
(ALL b. set (seqSVs' b p) = set (seqSVs' b c)));
Ball aidps C4; seqIsLog (?fml |- $?U);
ALL U. (map (%Y'. $Y' |- $U) ?Ys, ?fml |- $U) : ?logI;
ALL Y:set ?Ys. strFVPPs Y <= fmlFVPPs ?fml;
?logI <= PC ` rulefs ?rules; aidps <= ?rules; rlscf <= ?rules |] ==>
ldi aidps ?rules (map (%Y'. $Y' |- $?U) ?Ys, ?fml |- $?U)"
\end{verbatim}
\subsection{Isabelle text of lemmas for \S\ref{s-ldi-uc-ctr},
Unit-Contraction and Contraction}
The set \texttt{stars} $S$ is the set of all structures which consist of the
structure $S$ preceded by any number of occurrences of \texttt{Star}
(ie, of \# symbols).
The relation \texttt{seqdel (stars I)} will be the relation used for
unit-contraction, where
$(p, c) \in \texttt{seqdel}\ Fs$ if $p$ is obtained from $c$ by deleting
(in any number of places) a structure in $Fs$.
\begin{verbatim}
ex_box_uc : "[| ?atom = I; (?C, ?Cd) : seqdel (stars ?atom);
?Cd : derivableR aidps {?Cd'} |] ==>
EX C'. (C', ?Cd') : seqdel (stars ?atom) &
C : derivableR aidps {?C'}"
\end{verbatim}
The rules for replacing $(\emptyset, X)$ by $X$ on the left and the right
of the $\vdash$ are \texttt{ila} and \texttt{ils},
and the left contraction rule is \texttt{cA}.
\begin{verbatim}
delI_der : "[| (?Y, ?Y') : strdel (stars I); aidps <= ?rules;
{ila, ils} <= ?rules |] ==>
{([$?X |- $?Y], $?X |- $?Y'), ([$?Y |- $?X], $?Y' |- $?X)} <=
derl (PC ` rulefs ?rules)"
ldi_ila : "[| aidps <= ?rules; {ila, ils} <= ?rules |] ==>
ldi aidps ?rules (PC ila)"
\end{verbatim}
\begin{verbatim}
ctr_der : "[| (?Y, ?Y') : mstrctr; aidps <= ?rules; {cA} <= ?rules |] ==>
{([$?X |- $?Y], $?X |- $?Y'), ([$?Y |- $?X], $?Y' |- $?X)} <=
derl (PC ` rulefs ?rules)" :
ldi_cA : "[| aidps <= ?rules; {cA} <= ?rules |] ==> ldi aidps ?rules (PC cA)"
\end{verbatim}
Note that the theorem \texttt{ctr\_der} needs to be applied twice
(once for each side of the $\vdash$) to give the result in the main text.
\subsection{Isabelle text of lemmas for \S\ref{s-del}, the Deletion Lemma}
The proof threw up a number of (logically)
trivial cases which nonetheless needed particular results to be included
as lemmas to be used automatically in simplification, such as:
\begin{verbatim}
stars_Sf_not_Comma : "Comma ?X ?Y ~: stars (Structform ?fml)"
Stars_Sf_ne_Comma : "Comma ?X ?Y ~= funpow Star ?n (Structform ?fml)"
Stars_eq_Comma_iff : "(Comma ?X ?Y = funpow Star ?n (Comma ?U ?V)) =
(?n = 0 & ?X = ?U & ?Y = ?V)"
\end{verbatim}
\begin{verbatim}
deletion :
"[| ?atom = Structform ?fml; (?C, ?Cd) : seqdel ?pn (stars ?atom);
?C' : derivableR aidps {?C} |]
==> (EX Cd'.
(?C', Cd') : seqdel ?pn (stars ?atom) &
Cd' : derivableR aidps {?Cd}) |
(EX n m Z1 Z2.
?C' = ($(funpow Star n ?atom) |- $(funpow Star m (Comma Z1 Z2))) &
(if odd m then $Z1 |- * $Z2 else * $Z1 |- $Z2)
: derivableR aidps {?Cd} |
?C' = ($(funpow Star m (Comma Z1 Z2)) |- $(funpow Star n ?atom)) &
(if even m then $Z1 |- * $Z2 else * $Z1 |- $Z2)
: derivableR aidps {?Cd})" : Thm.thm
\end{verbatim}
\subsection{Isabelle text of lemmas for \S\ref{s-ldi-wk},
Unit-Weakening and Weakening}
\begin{verbatim}
ex_box_repI_atoms :
"[| (?C, ?Cd) : seqrepI str_atoms; ?C : derivableR aidps {?C'} |] ==>
EX Cd'. (?C', Cd') : seqrepI str_atoms & ?Cd : derivableR aidps {Cd'}"
\end{verbatim}
\begin{verbatim}
seqrepI_der : "[| (?S', ?S) : seqrepI ?Fs; aidps <= ?rules;
{ila, ils, mra} <= ?rules |] ==>
([?S], ?S') : derl (PC ` rulefs ?rules)"
ldi_repI_atoms : "[| aidps <= ?rules; {ila, ils, mra} <= ?rules;
(?c, ?p) : seqrepI str_atoms |] ==>
ldi aidps ?rules ([?p], ?c)"
\end{verbatim}
\begin{verbatim}
seqwk_der : "[| (?S', ?S) : seqdel ?Fs;
aidps <= ?rules; {mra} <= ?rules |] ==>
([?S], ?S') : derl (PC ` rulefs ?rules)"
\end{verbatim}
\begin{verbatim}
ldi_wkI : "[| aidps <= ?rules; {mra, ila, ils, tS, fA} <= ?rules;
(?c, ?p) : seqdel (stars I) |] ==>
cldi aidps ?rules ([?p], ?c)"
\end{verbatim}
\subsection{Isabelle text of lemmas for \S\ref{s-mult},
Binary Multiplicative Logical Introduction rules}
\begin{verbatim}
wkI_der : "[| (?Y', ?Y) : strdel (stars I); aidps <= ?rules;
{iila, iils} <= ?rules |] ==>
{([$?X |- $?Y], $?X |- $?Y'), ([$?Y |- $?X], $?Y' |- $?X)}
<= derl (PC ` rulefs ?rules)"
\end{verbatim}
\begin{verbatim}
ldi_wkI_alt : "[| aidps <= ?drules;
{iila, iils, tS, fA, ila, ils, tA, fS} <= ?drules;
(?c, ?p) : seqdel (stars I) |] ==>
cldi aidps ?drules ([?p], ?c)"
\end{verbatim}
The following result applies to display postulates satisfying
a set of standard set of display postulates properties.
\begin{verbatim}
dp_props_def : "dp_props (?ps, ?c) =
(length ?ps = 1 & ~ seqCtnsFml ?c & distinct (seqSVs ?c) & seqIsLog ?c &
(ALL p:set ?ps. seqIsLog p & distinct (seqSVs p) & ~ seqCtnsFml p &
(ALL b. set (seqSVs' b p) = set (seqSVs' b ?c))))"
repm_some1sub :
"[| ALL rule:?rules. dp_props (PC rule); ?As ~= [];
([?prem], ?concl) : derl (PC ` rulefs ?rules);
(?concl, ?sconcls) : lseqrepm ?rsa ?sa (Structform ?fml) ?As |]
==> EX sprems.
(?prem, sprems) : lseqrepm ?rsa ?sa (Structform ?fml) ?As &
(ALL k EX subys. map (%suby. seqSubst suby ?pat) subys = ?Ys"
\end{verbatim}
\begin{verbatim}
ands_mix_gen :
"[| PC ` rulefs aidps <= ?rules; ands_rep <= ?rules;
PC ` rulefs ilrules <= ?rules;
(?Z, [?X, ?Y]) : lseqrepm repnI_atoms True (Structform (?A && ?B))
[Structform ?A, Structform ?B] |] ==> ?Z : derrec ?rules {?X, ?Y}"
\end{verbatim}
\begin{verbatim}
lseqrepm_interp_andT :
"[| ands_rep <= ?rules; ora_rep <= ?rules;
PC ` rulefs {anda} <= ?rules; PC ` rulefs {ors} <= ?rules;
PC ` rulefs aidps <= ?rules; PC ` rulefs ilrules <= ?rules;
(?WZ, [?pa, ?pb]) : lseqrepm repnI_atoms True
(Structform (?A && ?B)) [Structform ?A, Structform ?B];
ALL S:set [?pa, ?pb]. Ex (interpn ?rules S) |]
==> Ex (interpn ?rules ?WZ)"
\end{verbatim}
\begin{verbatim}
ldin_ands_rep : "[| (?ps, ?WZ) : ands_rep; PC ` rulefs ilrules <= ?drules;
PC ` rulefs aidps <= ?drules; PC ` rulefs {ors} <= ?drules;
PC ` rulefs {anda} <= ?drules; ora_rep <= ?drules;
ands_rep <= ?drules; ?lrules <= aidps |] ==>
ldin ?lrules ?drules (?ps, ?WZ)"
\end{verbatim}
\begin{verbatim}
ldi_add_equiv : "(?c : derrec ldi_add {}) = (?c : derivableR rlscf_nw {})"
ldi_add_cldin : "?rule : ldi_add ==> cldin aidps ldi_add ?rule"
ldi_add_interp : "Ball (derrec ldi_add {}) (edin aidps ldi_add)"
rlscf_nw_interp : "Ball (derivableR rlscf_nw {}) (edi aidps rlscf_nw)"
\end{verbatim}
\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End: