\documentclass{amsart}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{mathabx}
\usepackage{stmaryrd}
\theoremstyle{plain}
\newtheorem{thm}{Theorem}[section]
\newtheorem{cor}[thm]{Corollary}
\newtheorem{lem}[thm]{Lemma}
\theoremstyle{definition}
\newtheorem{defn}[thm]{Definition}
\newtheorem{exer}{Exercise}
\newtheorem{exam}{Example}
\theoremstyle{remark}
\newtheorem{rem}[thm]{Remark}
\newtheorem{fact}[thm]{Fact}
\newtheorem{hyp}[thm]{Hypothesis}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\res}{\restriction}
\newcommand{\dom}{\operatorname{dom}}
\newcommand{\ran}{\operatorname{ran}}
\newcommand{\cof}{\operatorname{cof}}
\newcommand{\ot}{\text{o.t.}}
\newcommand{\on}{\text{ON}}
\newcommand{\card}{\text{CARD}}
\newcommand{\ac}{\text{AC}}
\newcommand{\zf}{\text{ZF}}
\newcommand{\zfc}{\text{ZFC}}
\newcommand{\sP}{\mathcal{P}}
\newcommand{\sL}{\mathcal{L}}
\newcommand{\ch}{\text{CH}}
\newcommand{\gch}{\text{GCH}}
\newcommand{\vb}{\text{var}}
\newcommand{\pa}{\text{PA}}
\newcommand{\fa}{\text{F}}
\newcommand{\con}{\text{CON}}
\newcommand{\bz}{\boldsymbol{0}}
\newcommand{\lD}{{\Delta}}
\newcommand{\lS}{{\Sigma}}
\newcommand{\lP}{{\Pi}}
%\newcommand{\models}{\vDash}
\newcommand{\proves}{\vdash}
\newcommand{\sg}{\text{sg}}
\newcommand{\seq}{\text{Seq}}
\newcommand{\lh}{\text{lh}}
\begin{document}
\begin{center}
\bf First Order Logic and the G\"{o}del Incompleteness Theorem
\end{center}
\bigskip
\section{Review of First Order Logic}
We give a brief overview of the main concepts and results of first order
logic. This is far from a complete survey, but rather a quick presentation of the
central points. We first review the basic set-up of first-order logic, with an eye
towards two cases of particular interest: set theory (which we have been doing all along)
and number theory (or arithmetic) which will be of importance for the G\"{o}del
incompleteness theorem. In the case of set theory, we already briefly introduced
some of the relevant notions in \S~\ref{secaxioms} but here do the general case.
\begin{defn}
By a \emph{language of first-order logic} we mean a collection
$\sL= \{ R_i, f_i, c_i\}_{i \in \omega}$ of relation symbols $R_i$, function
symbols $f_i$, and constant symbols $c_i$.
\end{defn}
Each relation symbol $R$ or function symbol $f$ in a first-order language
has an \emph{arity} associated with it, with is a positive integer.
The exact meaning of the arity is made clear below, but it is intending
to denote the arity of a relation or function to which the symbol
$R$ or $f$ corresponds.
\begin{exam}
The language of set theory consists of a single binary relation symbol
$\in$. The language of number theory consists of binary function symbols
$+, \cdot, E$ ($E$ is intended to denote the exponentiation function $(x,y)
\to x^y$, a unary function symbol $S$ (intending to denote the
successor function $n \to n+1$), a binary relation symbol $<$, and a constant
symbol $\bz$. Thus, $\sL= \{ <, +, \cdot , E, S, \bz \}$.
\end{exam}
Note that the definition of a language is purely ``syntactical,'' that is,
no meaning is in anyway assigned to these symbols of the language (e.g.,
there is nothing that says $+$ somehow really represents what we think of a addition
on the natural numbers, whatever that means).
For any first-order language $\sL$, we consider also certain logical symbols
which are always allowed in constructing formulas. These are variable
symbols $x_0, x_1, x_2, \dots$, connectives $\wedge, \vee, \neg$,
quantifiers $\exists, \forall$, parentheses $(, )$, and a symbol $\approx$
for equality. We could choose to
use just $\wedge, neg $ and $\forall$, defining $\vee$ and $\exists$ in terms
of these. We now define the formulas (or well-formed formulas, wffs) of
the language $\sL$.
\begin{defn}
For $\sL$ a first-order language, the \emph{terms} of the language
are defined recursively as follows:
\begin{enumerate}
\item
Variable symbols $x_i$ and constant symbols $c_i$ are terms.
\item
If $f \in \sL$ is an $n$-ary function symbol and $t_1,\dots,t_n$
are terms, then $f(t_1,\dots,t_n)$ is a term.
\end{enumerate}
\end{defn}
The terms of the language are the syntactical objects which are intended to
denote actual objects (how they actually do this will be made clear below).
\begin{defn}
The \emph{formulas} of the language $\sL$ are defined recursively as
follows.
\begin{enumerate}
\item \label{atomic}
If $R \in \sL$ is an $n$-ary relation symbol and $t_1,\dots, t_n$ are
terms, then $R(t_1,\dots,t_n)$ is a formula. Also,
if $t_1,t_2$ are terms, then $t_1 \approx t_2$ is a term.
\item
If $\phi, \psi$ are formulas then so are $(\phi \wedge \psi)$, $(\phi \vee \psi)$
$(\neg \phi)$.
\item
If $\phi$ is a formula, then so are $(\exists x_i \phi)$ and
$(\forall x_i \phi)$.
\end{enumerate}
\end{defn}
The formulas in case (\ref{atomic}) are said to be the \emph{atomic}
formulas.
An occurance of a variable $x$ in a formula $\phi$ is said to be \emph{free}
if it is not within the scope of a $\forall x$ or $\exists x$
quantifier. More precisely:
\begin{defn}
We define by recursion of the formulas:
\begin{enumerate}
\item
Any occurance of $x$ in an atomic formulas is free.
\item
$x$ occurs free in $(\phi \wedge \psi)$ where it occurs free in $\phi$ and
where it occurs free in $\psi$. $x$ occurs free in $(\neg \phi)$
where it occurs free in $\phi$.
\item
$x$ occurs free in $(\forall y \phi)$ where it occurs free in $\phi$ if
$y \neq x$. If $y=x$, then $x$ does not occur free in $(\forall y \phi)$.
\end{enumerate}
\end{defn}
We say $x$ occurs free in $\phi$ if it has a free occurance in $\phi$.
We say a \emph{sentence} is a formulas with no free variables.
We usually write $\phi(x_1,\dots,x_k)$ to denote a formula $\phi$
whose free variables are among $x_1,\dots,x_k$.
Intuitively, a formulas $\phi(x_1,\dots,x_k)$ is intended to an assertion
about the objects represented by $x_1,\dots, x_n$. In particular,
a sentence is intending to be a statement whose can be ascertained
without any knowledge of what certain variables represent.
We have not yet
officially assigned any meaning to formulas yet, however, they are
currently purely syntactical.
\begin{exam}
If $\sL$ is the language of set theory, the only terms are the variable $x_i$.
The only atomic formulas are $x_i \approx x_j$ and $x_i \in x_j$. The axioms of
$\zf$ written down earlier were all sentences in the language of set theory.
All of the theorems proven earlier about ordinals, cardinals, etc. are
expressible as sentences in the language of set theory.
Suppose now $\sL$ is the language of number theory. Then
$\phi=( (\neg x \approx 0) \wedge (\neg x \approx S(0)) \wedge
\forall m\ \forall n\ (x \approx m \cdot n \rightarrow
(m \approx S(0) \vee n \approx S(0)) ))$ is a formula with free variable $x$
which attempts to assert that $x$ is prime. The sentence
$\psi= \forall m\ \exists n\ \exists k\ (n >m \wedge \phi(n) \wedge \phi(k)
\wedge k\approx S(S(n)))$ asserts the twin-prime conjecture.
\end{exam}
In order to assign meaning to a formulas, we must have a ``universe''
of objects in which to interpret the quantifiers, interpretations of the
relation, function, and constant symbols of the language, and if the formula
has free variables we also have to know how interpret the free variables.
This is made precise in the following definition of structure.
\begin{defn}
A \emph{structure} for a first-order language
$\sL= \{ R_i, f_i, c_i\}_{i \in \omega}$ is an object of the form
$\frak{A}=(A; R_i^A, f_i^A,c_i^a)$ where $A$ is a non-empty set,
$R_i^A$ is an $n$-ary relation on $A$ (where $n$ is the arity of the
relation symbol $R_i)$, $f_i^A\colon A^n \to A$ is an $n$-ary function
(again, $n$ is the arity of $f_i$), and $c_i^A \in A$.
\end{defn}
Thus, a structure provides a universe $A$ as well an an interpretation
of the symbols of the language $\sL$. Structures may be thought of as
fairly general mathematical objects. For example, consider the language
of group theory, which consists of a single binary function symbol $\cdot$.
A structure for the language is a set $A$ with a binary operation
$(a,b) \to a \cdot b \in A$ on $A$. A group, for example, would
be such a structure (but of course not all structures would be groups).
As another example, for $\sL$ the language of number theory, the
``usual'' natural numbers would be a structure
$\N= \{ \N; +^\N, \cdot^\N, E^\N, <^\N, S^\N, 0^\N \}$ for the language,
where $+^\N$ denotes the usual addition on $\N$, etc. Of course, there
are many other structures for this language. [note: what we mean by the ``usual''
natural numbers is vague and undefined here. We can think of this as meaning
the finite ordinals of a model of $\zfc$ set theory which we suppress
mentioning and identify with the metatheory].
A structure will be sufficient to decide the truth or falsity of a sentence,
but for a formula $\phi(x_1,\dots,x_n)$ with free variable, we need
also a map $s \colon \vb \to A$ interpreting the variables (or at least the
free variables in $\phi$). We wish to define precisely the meaning of
`` the structure $\mathfrak{A}$ satisfies $\phi$ at the variable assignment $s$,''
which we will denote as $\mathfrak{A} \models \phi [s]$. The formal
definition follows.
\begin{defn}
Let $\mathfrak{A}$ be a structure for the first-order language $\sL$,
$\phi$ a formula, and $s \colon \vb \to |\mathfrak{A}|$. We define
$\mathfrak{A} \models \phi [s]$ follows.
First we extend the interpretation function $s$ to terms $t$.
\begin{enumerate}
\item
If $x \in \vb$, then $s(x)$ is already defined. If
$c$ is a constant symbol, then $s(c)=c^\mathfrak{A}$.
\item
If $u=f(t_1,\dots,t_n)$ is a term, then $s(u)=f^\mathfrak{A}(s(t_1),\dots,
s(t_n))$.
\end{enumerate}
Next we define the notion $\mathfrak{A} \models \phi [s]$ by recursion on
$\phi$ as follows.
\begin{enumerate}
\item
If $\phi$ is the atomic formula $R(t_1,\dots,t_n)$, then
$\mathfrak{A} \models \phi [s]$ iff $R^\mathfrak{A}(s(t_1),\dots,s(t_n))$.
If $\phi$ is the atomic formula $t_1 \approx t_2$, then
$\mathfrak{A} \models \phi [s]$ iff $s(t_1)= s(t_2)$.
\item
$\mathfrak{A} \models (\phi \wedge \psi) [s]$
iff $\mathfrak{A} \models \phi [s]$ and $\mathfrak{A} \models \psi [s]$.
$\mathfrak{A} \models (\neg \phi) [s]$ iff it is not the case that
$\mathfrak{A} \models \phi [s]$.
\item
$\mathfrak{A} \models \forall x \phi [s]$ iff for every $a \in A$ we have that
$\mathfrak{A} \models \phi [s(x|a)]$, where
$s(x|a)(y)=s(y)$ if $y \neq x$ and $s(x|a)(x)=a$.
\end{enumerate}
\end{defn}
The above definition contains no surprises; it simply formalizes the usual
notion of satisfaction of a statement in a mathematical structure.
If $\Gamma$ is a collection of formulas, we write $\mathfrak{A}
\models \Gamma [s]$ to mean $\mathfrak{A} \models \phi [s]$ for all
$\phi \in \Gamma$.
The algebraic notion of homomorphism can be given in this general context.
\begin{defn}
Let $\mathfrak{A}$, $\mathfrak{B}$ be two structures for a first-order
language $\sL=\{ R_i,f_i,c_i\}$. A \emph{homomorphism} $\pi$ from
$\mathfrak{A}$ to $\mathfrak{B}$ is a map $\pi \colon A \to B$
satisfying: $R^\mathfrak{A}(a_1, \dots a_n)$ iff
$R^\mathfrak{B}(\pi(a_1), \dots \pi(a_n))$,
$\pi(f^\mathfrak{A}(a_1,\dots,a_n))= f^\mathfrak{B}(\pi(a_1),\dots,
\pi(a_n))$, and $\pi(c^\mathfrak{A})=c^\mathfrak{B}$ for all
$a_1,\dots, a_n \in A$ and all relation, function, and constant symbols
$R,f,c$ in $\sL$. An \emph{isomorphism} is a one-to-one, onto,
homomorphism.
\end{defn}
We now introduce the notion of logical implication.
\begin{defn}
Let $\Gamma$ be a collection of formulas in a first-order language $\sL$, and
let $\phi$ be a formula. We write $\Gamma \models \phi$ if for every
structure $\mathfrak{A}$ for $\sL$ and every $s \colon \vb \to |\mathfrak{A}|$,
if $\mathfrak{A} \models \Gamma [s]$, then $\mathfrak{A} \models \phi [s]$.
\end{defn}
Thus, $\Gamma \models \phi$ if every structure satisfying $\Gamma$ also
satisfies $\phi$. Frequently we think of $\Gamma$ as being the axioms
for some theory we are studying.
\begin{exam}
For $\sL$ the language of group theory (i.e., a single binary function
symbol, but for convenience we now add a constant symbol $e$
to the language as well), let $\Gamma$ be the following set of sentences:
$\forall x\ \forall y\ \forall z\ x \cdot (y \cdot z) \approx (x \cdot y) \cdot z$
$\forall x\ (x \cdot e \approx x )$
$\forall x \ \exists y (x \cdot y \approx e)$.
\noindent
Thus, $\Gamma$ is the usual set of axioms for groups. If $\phi$ is a sentence
in the language of group theory, then $\Gamma \models \phi$ iff
$\phi$ is true in all groups.
\end{exam}
We have introduced the notion of logical satisfaction, $\models$ above.
This is one of two of the basic notions of implication from first-order logic.
The other is the notion of provability, or deducability. We write
$\Gamma \vdash \phi$ for this notion. This notion can be defined in several,
ultimately equivalent, ways. The exact definition is not so important for
us here, only that this notion satisfies several reasonable properties.
However, for the sake of completeness we give one formal definition of
$\vdash$. We assume below that the only connectives are
$\neg$ and $\rightarrow$.
\begin{defn}
The \emph{logical axioms} (for a given first-order language $\sL$)
are the universal closures of the following
formulas (here $\alpha$, $\beta$, $\gamma$ are arbitrary formulas of $\sL$):
\begin{enumerate}
\item
$\alpha \rightarrow (\beta \rightarrow \alpha)$.
\item
$(\alpha \rightarrow (\beta \rightarrow \gamma)) \rightarrow ((\alpha \rightarrow
\beta) \rightarrow (\alpha \rightarrow \gamma))$
\item
$(\neg \alpha \rightarrow \neg \beta) \rightarrow ((\neg \alpha \rightarrow
\beta) \rightarrow \alpha)$
\item
$\forall x \ \alpha \rightarrow \alpha^x_t$ whenever $t$ is \emph{substitutable}
for $x$ in $\phi$ (this means that where $x$ occurs free in $\phi$,
no variable of $t$ is in the scope of a quantifier). Here $\alpha^x_t$
is the result of replacing $x$ where it occurs free by $t$.
\item
$\forall x\ (\alpha \rightarrow \beta) \rightarrow (\forall x\ \alpha \rightarrow
\forall x \ \beta)$
\item
$\alpha \rightarrow \forall x \ \alpha$, where $x$ is not free in $\alpha$.
\item
$x_i \approx x_i$
\item
$(x_i \approx x_j) \rightarrow (\alpha \approx \alpha')$, where $\alpha$ is
atomic and $\alpha'$ is the result of replacing some of the ocurances of
$x_i$ by $x_j$.
\end{enumerate}
\end{defn}
\begin{defn}
$\Gamma \vdash \phi$ iff there is a sequence of formulas (a deduction)
$\phi_0,\phi_1,\dots,\phi_n=\phi$ such that for all $i$ either $\phi_i$
is a logical axiom or an element of $\Gamma$,
or the formulas $\psi$, $\psi \rightarrow \phi_i$
occur before $\phi_i$ for some $\psi$ (we say $\phi_i$ is deduced by
modus ponens).
\end{defn}
The central result in first-order logic is the G\"odel completeness
theorem:
\begin{thm}
For any first-order language $\sL$, set of formulas $\Gamma$, formula
$\phi$, we have $\Gamma \models \phi$ iff $\Gamma \vdash \phi$.
\end{thm}
As we said, the exact details of the definition of the provability relation
are not so important. All we really require is that we be able to prove the
completeness theorem and that the notion ``$\vec \phi$ is a deduction
of $\psi$ from $\Gamma$ is recursive for recursive $\Gamma$ (we discuss
the notion of recursive below).
\section{Arithmetic} \label{secarith}
We give now a more or less standard axiomatization for ``the natural numbers.''
That is, we write down axioms which intend to capture the intuitive properties
we ascribe to the natural numbers. As we will see below, however, we must be careful
in using a phrase such as ``the natural numbers.'' The following axiom scheme
is referred to as the Peano axioms for the natural numbers. It consists of
a finite set of axioms (sometimes called the Frege subsystem) together with
an infinite schema of induction axioms. We present the axiom scheme in the language
$\sL= \{ +, \cdot, E, S, < ,0\}$, but note that the language consisting of
just $+$ and $\cdot$ would suffice (the other functions, relations, and $0$ can
be defined from $+$ and $\cdot$ within the version of the Peano
axiom scheme mentioning only the axioms for these two function). It simplifies things
a little to have these extra symbols in the language, however.
\begin{defn}
The Peano axiom scheme is the following set of sentences in the language of
number theory.
\noindent
(Successor Axioms)
$\forall x\ \neg(S(x) \approx \bz)$.
$\forall x\ \forall y\ (S(x) \approx S(y) \rightarrow x \approx y)$
\noindent
(Order axioms)
$\forall x\ \neg (x < \bz)$
$\forall x\ \forall y (x0 \end{cases}$$
\item
The class is closed under minimalization. That is, if $g(\vec x, n)$
is recursive and for all $\vec x$ there is an $n$ such that $g(\vec x, n)=0$,
then the function $f$ defined by $f(\vec x)= \mu n\ (g(\vec x, n)=0)$
is recursive. Here ``$\mu n$'' denotes ``the least $n$.''
\end{enumerate}
\end{defn}
There is a slight redundancy in the above list according to the next exercise.
\begin{exer}
Show that multiplication can be defined by a primitive recursion
from addition, as can the successor function. Show also that
exponentiation is recursive.
\end{exer}
\begin{exer} \label{exerrec}
Show that the equality relation on $\omega$ is recursive using the
above official definition of recursive. Show that the function
$a \dotdiv b = \begin{cases} a-b & \text{ if } a \geq b \\
0 & \text{ otherwise} \end{cases}$ is recursive. Show that $\sg(n)=
\begin{cases} 1 & \text{ if } n > 0 \\ 0 & \text{ if } n=0 \end{cases}$
\ is recursive.
\end{exer}
The subclass of functions defined by all but the last (minimalization)
clause is called the class of \emph{primitive recursive } functions. A relation
is said to be primitive recursive if its characteristic function is.
\begin{defn}
We say a relation $R \subseteq \omega^n$ is recursive iff its characteristic
function $\chi_R \colon \omega^n \to \{ 0, 1 \}$ is recursive.
\end{defn}
The next lemma shows we can go back and forth between relations and functions.
\begin{lem}
Let $f \colon \omega^n \to \omega$ be a (total) function. Then $f$
is recursive iff its graph $G_f= \{ (\vec a,b) \colon f(\vec a)=b \}$ is.
\end{lem}
\begin{lem}
If $R$ is a recursive relation and $f$ is a recursive function, then
$R'(n) \leftrightarrow R(f(n))$ is also recursive. The same is true for primitive
recursive.
\end{lem}
\begin{proof}
$\chi_{R'}(n)= \chi_{r}(f(n))$ is a composition of two recursive (or
primitive recursive) functions.
\end{proof}
\begin{proof}
If $f$ is recursive, then $\chi_{G_f}(\vec a, b)= \chi_{=} (f(\vec a),b)$
is recursive using exercise~\ref{exerrec}. Conversely, if
$G_f$ is recursive, so $\chi_{G_f}$ is a recursive function, then
$f(\vec a)= \mu b\ (1 \dotdiv \chi_{G_f}(\vec a,b) =0)$ is recursive.
\end{proof}
\begin{lem} \label{lemrec1}
The class of recursive relations contains all relations defined by atomic formulas,
and is closed under finite unions, intersections, complements,
and bounded number quantification. The same is true for the primitive recursive
relations. In particular, the primitive recursive relations
contain all of the $\lD^0_0$ relations.
\end{lem}
\begin{proof}
If $t=t(x_1,\dots,x_n)$ is a term, then a straightforward induction
show that the correspoding function $f_t(a_1,\dots,a_n)=
t^{\N}(a_1,\dots,a_n)$ is primitive recursive (more precisely we should
write $f_t(a_1,\dots,a_n)=$ the unique $m$ such that
$\N \models t(S^{a_1}(\bz),\dots, S^{a_n}(\bz))=S^m(\bz)$).
If $\phi$ is atomic of the form $\phi=(t \approx u)$, then
the relation defined by $\phi$ is of the form $R_\phi(n)
\leftrightarrow (f_t(n)=f_u(n))$ is primitive recursive. The same is true
if $\phi=(t < u)$ as the relation $<$ is easily primitive recursive.
If $R_1$, $R_2$ are primitive recursive, then so is $R=R_1 \cap R_2$
since $\chi_R(n)=\chi_{R_1}(n) \cdot \chi_{R_2}(n)$. The same is easily
also true for unions and complements.
Suppose now $R(n) \leftrightarrow \exists m \leq n\ S(n,m)$
where $S$ is recursive (or primitive recursive).
Define $R'(n,k) \leftrightarrow \exists m \leq k \ S(n,m)$.
Then $\chi_{R'}(n,0)= \chi_S(n,0)$, and for $k>0$
$\chi_{R'}(n,k)= \sg(\chi_{R'}(n,k-1)+ \chi_S(n,k))$, and so
$\chi_{R'}$ is recursive (or primitive recursive) by closure under
primitive recursion. Since $\chi_R(n)=\chi_{R'}(n,n)$ we also
have that $R$ is (primitive) recursive.
\end{proof}
The proof of the previous lemma actually shows a bit more.
\begin{lem}
If $S(n,m)$ is recursive and $f \colon \omega \to \omega$ is recursive, then
$R(n) \leftrightarrow \exists m \leq f(n) \ S(n,m)$ is recursive.
Likewise, if $S$ and $f$ are primitive recursive, then $R$ is
primitive recursive.
\end{lem}
\begin{proof}
Define $R'(n,k) \leftrightarrow \exists m \leq f(k)\ S(n,m)$.
Then $\chi_{R'}(n,k)=\chi_{R''}(n,f(k))$, where
$R''(n,k) \leftrightarrow \exists m \leq k\ S(n,m)$ is recursive
(or primitive recursive) from lemma~\ref{lemrec1}. Thus, $R'$
is (primitive) recursive and thus so is $R(n) \leftrightarrow
R'(n,n)$.
\end{proof}
The next lemma shows that definitions by cases preserve recursiveness or
primitive recursiveness.
\begin{lem}
If $R_1,\dots, R_k$ are recursive (or primitive recursive) relations, and
$f_1,\dots,f_k$ are recursive (or primitive recursive) functions, the
the function
$$
f(n)= \begin{cases} f_1(n) & \text{ if } R_1(n) \\
f_2(n) & \text{ if } R_2(n)\\ &{ \vdots} \\
f_k(n) & \text{ if } R_k(n)
\end{cases}
$$
is also recursive (primitive recursive).
\end{lem}
\begin{proof}
$\chi_f(n)= f_1(n) \cdot \chi_{R_1}(n) + \dots + f_k(n) \cdot \chi_{R_k}(n)$.
\end{proof}
We introduce some coding and decoding functions on the integers.
Let $p(0)=2, p(1)=3, p(2)=5,\dots,$ and in general $p(n)=$ the next prime after
$p(n-1)$ (we refer to $p(i)$ as the ``$i^{\text{th}}$ prime'').
\begin{defn} \label{defcoding}
For $(a_0,\dots,a_k) \in \omega^{< \omega}$ let
$\langle a_0,\dots,a_k \rangle = p_0^{a_0+1} p_1^{a_1+1} \cdots p_k^{a_k+1}$.
Let $\seq= \{ \langle \vec a \rangle \colon \vec a \in \omega^{< \omega} \}$
be the set of all codes of finite sequences. For
$n = \langle a_0, \dots, a_k \rangle \in \seq$, let
$\lh(n)=k+1$ be the length of the sequence coded by $n$, and
for $n \notin \seq$, let $\lh(n)=0$. Define the binary
decoding function $(n,i) \to (n)_i$ by $(n)_i= a_i$
$n= \langle a_0,\dots,a_k \rangle$ codes a sequence of length $> i$, and
$(n)_i=0$ otherwise.
\end{defn}
Clearly the map $(\vec a ) \to \langle \vec a \rangle$ is one-to-one on
$\omega^{< \omega}$.
\begin{lem} \label{lemcoding}
The function $p$ and the set $\seq$ are primitive recursive.
For any fixed $ k \in \omega$, the function $(a_0,\dots,a_k) \to
\langle a_0,\dots,a_k \rangle$ is primitive recursive. The function
$\lh$ and the decoding function $(n,i) \to (n)_i$ are primitive recursive.
\end{lem}
\begin{proof}
The recursive definition $p(i)= \mu n \ [ n> p(i-1) \wedge n \text { is prime }]$.
shows that the prime function is recursive. To see it is primitive recursive,
use the fact that, for example, there is a prime between any $n$ and $2n$,
so we may write $p(i)=\mu n \leq 2 p(i-1)\
[ n> p(i-1) \wedge n \text { is prime }]$. Note that
$n \in \seq \leftrightarrow \forall p \leq n\ \forall q \leq n\ [(p,q
\text{ are prime } \wedge p < q \wedge q | n) \rightarrow p |n]$.
Since the dividing relation is clearly primitive recursive, this shows
$\seq$ is also. For any fixed $ k$, the function $(a_0,\dots,a_k) \to
\langle a_0,\dots,a_k \rangle$ is clearly primitive recursive.
We have $\lh(n)= \mu l \leq n \ [(n \notin \seq \wedge n=0) \vee
(n \in \seq \wedge s(n,l))]$ where
\begin{equation*}
\begin{split}
s(n,l) \leftrightarrow &\, \exists m \leq n \ [ (\forall p \leq n\
(p \text { is prime } ) \rightarrow (p |n \leftrightarrow p|m)) \wedge 2 | m
\wedge 4 \nmid m \\
& \wedge (\forall p,q \leq n\ \forall a \leq n\ (p,q \text { are prime }) \wedge
\neg \exists r (p < r < q \wedge r \text{ is prime }) \rightarrow
\\ & \quad \quad (p^a |n \leftrightarrow p^{a+1} |n) \\
& \wedge \exists p \leq n \ (p^{l} | n \wedge p^{l+1} \nmid n \wedge
\forall q \leq n ( q \text{ is prime }) \wedge q > p \rightarrow
q \nmid m)]
\end{split}
\end{equation*}
Thus, $s(n,l)$ asserts that there is an $m$ of the form
$m= 2^1 3^2 5^3 \cdots p^{l}$ and $p$ is the largest prime dividing
$n$.
Finally,
\begin{equation*}
\begin{split}
(n)_i= \mu k \leq n\ & [ (n \notin \seq \wedge k=0) \vee
(n \in \seq \wedge i \geq \lh(n) \wedge k=0) \\ & \vee
( n \in \seq \wedge i < \lh(n) \wedge
p(i)^{k+1} \mid n \wedge p(i)^{k+2} \nmid n)].
\end{split}
\end{equation*}
\end{proof}
\begin{exer}
Suppose $g$ and $h$ are primitive recursive and $f$ is defined from then
by the following \emph{total recursion}:
$f(\vec a, 0)=g(\vec a)$, $f(\vec a, n+1)=
h(\langle f(\vec a,0), \dots, f(\vec a ,n) \rangle, \vec a, n)$.
Show that $f$ is primitive recursive. [hint: show that the function
$f'(\vec a,n)=\langle f(\vec a,0),\dots, f(\vec a,n) \rangle$ is primitive
recursive.
\end{exer}
We now sketch a proof of the result that the class of recursive functions coincides
with the class of machine computable (total) functions. Since we have not given
a precise definition of ``machine computable,'' this sketch will
necessarily be a bit vague, but the reader with a particular definition of
machine computability in mind will have no trouble making the following
argument precise. We will not use this equivalence in what follows.
\begin{thm}
the class of recursive function coincides with the class of machine
computable (total) functions.
\end{thm}
\begin{proof}
It is straightforward to see that any recursive function is machine
computable, upon consideration of the various cases. For example,
if $f(\vec a)= \mu n\ (g(\vec ,n)=0)$ where $g$ is recursive (and
$\forall \vec a\ \exists n\ (g(\vec a, n)=0)$), then from an algorithm
computing $g$ we can easily construct an algorithm computing $f(\vec)$:
given input $\vec a$ we go into a loop computing $g(\vec a,n)$ for
successive values of $n$ until we find one where $g(\vec a,n)=0$, and
then we output that $n$. The other cases are likewise straightforward.
Suppose then that $f(n)$ is machine computable. Given input $n$, the
algorithm will, starting in a certain initial state, move through a
successive series series of states (following the algorithm), finally
ending in a ``halting'' state where the output value is given. We can code
the successive states of the machine during the computation as a sequence
of integers $s=\langle s_0,s_1,\dots,s_k \rangle$ where $s_0$ is the
initial state (which is determined in a simple way from the
input value $n$), and each transition from $s_i$ to $s_{i+1}$
is a valid step according to the algorithm. Finally we require the
last integer $s_k$ to be a halting state of the machine, which
encodes in some simple way the output value. Thus, we may write
\begin{equation*}
\begin{split}
f(n)= h( \mu s\ & [s \in \seq \wedge \text{ ``} s_0 \text{ codes
an initial state correspoding to input } n\text{''} \\ &
\wedge \forall i < \lh(s)-1\
\text{``} (s_i,s_{i+1}) \text{ is a valid transition'' } \\ & \wedge
\text{''} s_{\lh(s)} \text{ codes a halting state }])
\end{split}
\end{equation*}
where $h$ is a simple (primitive recursive) function which recovers the output
value from the final halting state. Note the essential use of the
minimalization operator here; we cannot replace this use by bounded
quantification. On the other hand, everything inside the square brackets is
primitive recursive. This shows that every recursive function can be defined
with only one use of the minimalization operator.
\end{proof}
\begin{cor}
Every recursive relation is $\lD^0_1$ (we will see the converse below).
\end{cor}
\begin{proof}
The proof above shows that if $R$ is recursive then it may be written in
the form $R(n) \leftrightarrow \exists s\ S(n,s)$, where in fact
$S \in \lD^0_0$ (all quantifiers in the definition of $S$ are bounded by
$n$).
\end{proof}
Finally in this section we mention some of the properties of the
pointclass definied earlier. First, we have the following closure properties.
\begin{thm}
For $n \geq 1$ the $\lS^0_1$ sets are closed under finite unions and
intersections, existential number quantification, bounded universal
number quantification (i.e., $\forall n \leq m$), and recursive substitution
(i.e., if $f$ is total recursive and $R \in \lS^0_n$, then so is
$R'(n) \leftrightarrow R(f(n))$).
Similarly, the $\lP^0_1$ sets are closed under finite unions and
intersections, universal number quantification, existential
number quantification, and recursive substitution.
\end{thm}
\begin{proof}
We consider the case $\lS^0_n$. Closure under existential number
quantification is obvious. Consider a bounded universal number
quantification, say $B(n) \leftrightarrow \forall m \leq n \ A(n,m)$
where $A \in \lS^0_n$. Thus, $B(n) \leftrightarrow
\forall m \leq n\ \exists k\ C(n,m,k)$, where $C \in \lP^0_{n-1}$.
Using our coding functions we can then write
$B(n) \leftrightarrow \exists \l \forall m \leq n\
C(n,m,(l)_k)$. By induction this shows $B$ is $\lS^0_n$.
The finite union and intersection cases are easy. Closure under recursive
substitution follows from tha fact that a recursive substitution into
a $\lD^0_0$ relation results in a recursive relation (from the closure
properties of recursive relations), and the fact that a recursive
relation is $\lD^0_1$.
\end{proof}
\begin{thm}
A relation $R \subseteq \omega$ is recursive iff $R \in \lD^0_1$.
\end{thm}
\begin{proof}
We have already shown that any recursive relation is $\lD^0_1$.
Suppose now that $R \in \lD^0_1$. Say $R(n) \leftrightarrow
\exists m\ P(n,m)$, and $\neg R(n) \leftrightarrow
\exists m\ Q(n,m)$ where $R,Q$ are recursive (since both $R$ and its
complement are $\lS^0_1$ by definition of $R$ being $\lD^0_1$).
Let $f(n)= \mu m\ [P(n,m) \vee Q(n,m)]$, so $f$ is recursive.
Then $R(n) \leftrightarrow P(n,f(n))$, so $R$ is recursive.
\end{proof}
\section{Representability in Arithmetic} \label{secrep}
An important point in the proof of the imcompleteness theorem is that all
recursive functions and relations are ``representable'' in arithmetic, and even
in the finite subsystem $\fa$ defined in \S\ref{secarith}. We define this
notion here and prove the representability of the recursive functions and
relations. The proof of the incompleteness theorem itself is given in the
next section.
\begin{defn}
We say a relation $R\subseteq \omega$ is representable in
$\fa$ if there is a formula
$\phi(x)$ (in the language of number theory) such that
for all $n \in \omega$, if $R(n)$ then $\fa \proves \phi(S^n(\bz))$, and
if $\neg R(n)$ then $\fa \proves \neg \phi(S^n(\bz))$.
We say a (total) function $f \colon \omega \to \omega$ is representale iff
its graph $G_f$ is. That is, there is a formula $\phi(x,y)$ such that
if $f(n)=m$ then $\fa \proves \phi(S^n(\bz), S^m(\bz))$, and if
$f(n) \neq m$ then $\fa \proves \neg \phi(S^n(\bz), S^m(\bz))$.
\end{defn}
\begin{exer}
Show that every representable relation or function is recursive.
\end{exer}
An important technical point is that representability of functions
coincides with a seemingly stronger concept which we now define.
\begin{defn}
A (total) function $f \colon \omega \to \omega$ is \emph{strongly
representable} if there is a formula $\phi(x,y)$ such that for all $n$,
$\fa \proves \phi(S^n(\bz), S^{f(n)}(\bz))$ and also
$\fa \proves [ \forall z\ (\phi(S^n(\bz),z) \rightarrow z \approx
S^{f(n)}(\bz))]$.
\end{defn}
Clearly strong representability implies representability.
We show that the converse holds, but first a simple technical
lemma.
\begin{lem} \label{leminit}
For any $n \in \omega$, $\fa \proves \forall z\ (z \leq S^n(0) \rightarrow
z \approx \bz \vee z \approx S(\bz) \vee \dots \vee
z \approx S^{n}(\bz))$.
\end{lem}
\begin{proof}
By induction on $n$. The result holds for $n=0$ since
$\fa \proves (z \leq \bz \rightarrow z \approx \bz)$
as $\fa \proves \neg (z < \bz)$. Asssume the result holds for $n$ and
assume $z \leq S^{n+1}(\bz)$. If $z < S^{n+1}(\bz)=S(S^n(\bz))$, then
$\fa$ proves that $z \leq S^n(\bz)$, in which case by induction
$\fa$ proves $z\approx \bz \vee \dots \vee z\approx S^n(\bz)$. Thus,
$\fa \proves z\approx \bz \vee \dots \vee z \approx S^{n+1}(\bz)$.
\end{proof}
\begin{lem} \label{lemstr}
If $f$ is representable, then it is strongly representable.
\end{lem}
\begin{proof}
Suppose $\phi(x,y)$ represents $f \colon \omega \to \omega$.
Define $\psi(x,y)=[ \phi(x,y) \wedge \forall w < y\ \neg \phi(x,w)]$.
We claim that $\psi$ strongly represents $f$. Fix $n \in \omega$,
and let $m=f(n)$. By asumption, $\fa \proves \phi(S^n(\bz),S^m(\bz))$.
We must show that $\fa \proves \forall w < S^m(\bz)\ \neg \phi(S^n(\bz),w)$.
Work within $\fa$, and assume $w < S^m(\bz)$. From lemma~\ref{leminit}
we can deduce $(w \approx \bz \vee w \approx S(\bz) \vee \dots
\vee w \approx S^{m-1}(\bz))$. Since $\phi$ represents $f$ we have that
$\fa \proves \neg \phi(S^n(\bz),\bz), \dots,$
$\fa \proves \neg \phi(S^n(\bz),S^{m-1}(\bz))$. From these two statements
it follows that $\fa \proves \forall w < S^m(\bz)\ \neg \phi(S^n(\bz),w)$.
Thus, $\fa \proves \psi(S^n(\bz),S^m(\bz))$.
Working within $\fa$, assume now $\psi(S^n(\bz),z)$, so
$\forall w < z\ \neg \phi(S^n(\bz), w)$. Since $\fa \proves
\phi(S^n(\bz),S^m(\bz))$, we may deduce that
$z \leq S^m(\bz)$ (we use that fact that $\fa \proves
(z< S^m(\bz) \vee z \approx S^n(\bz) \vee z> S^m(\bz))$). So we may deduce
$z \approx \bz \vee \cdots \vee z \approx S^m(\bz)$. Since
$\fa \proves \neg \phi(S^n(\bz),\bz), \dots, $ $\fa \proves
\neg \phi(S^n(\bz), S^{m-1}(\bz))$, we may deduce $z \approx
S^m(\bz)$.
\end{proof}
The next theorem is the result we need on representability.
\begin{thm}
Every recursive relation and function is representable in $\fa$.
\end{thm}
\begin{lem}
Let $t$ be a \emph{closed term}, that is, a term containing no free
variables. Then there is an $n \in \omega$ such that
$\fa \proves t \approx S^n(\bz)$.
\end{lem}
\begin{proof}
By induction on the term $t$. If $t=\bz$ this is trivial. If $t=S(u)$
this is also trivial as $\fa \proves u \approx S^n(\bz)$ for some
$n$ by induction, and $S(S^n(\bz))=S^{n+1}(\bz)$.
If $t= u+v$, then by induction it suffices to show that
$\fa \proves S^n(\bz) + S^m(\bz) \approx S^{n+m}(\bz)$.
This, in turn, is proved by induction on $n$, say, with the inductive
step given by $\fa \proves
S(S^{n-1}(\bz))+S^m(\bz)\approx S(S^{n-1}(\bz)+S^m(\bz)) \approx
S(S^{n+m-1}(\bz))=S^{n+m}(\bz)$.
The result for terms of the form $t=u \cdot v$ follows similarly from
$\fa \proves S^n(\bz) \cdot S^m(\bz)\approx S^{n \cdot m}(\bz)$
which is proved by induction, using the result for addition.
The result for exponentiation is similar.
\end{proof}
\begin{lem} \label{lemqf}
If $\phi$ is quantifier free then the relation $R$ defined by $\phi$
is representable in $\fa$.
\end{lem}
\begin{proof}
It suffices to prove this for atomic formulas. For this it suffices to show that
if $t,u$ are closed terms then either $\fa \proves t __n$ that $\fa \proves S^n(\bz) < S^m(\bz)$. For $m=n+1$,
$\fa \proves S^n(\bz) < S^{n+1}(\bz)=S(S^n(\bz))$ follow the axiom of $\fa$
$\forall x\ \forall y\ (x< S(y) \leftrightarrow x \leq y)$ which implies
$\forall x\ (x< S(x))$. Assuming the result is true for $m$,
$\fa \proves (S^n(\bz) < S^m(\bz))$, and the same axiom then shows
that $\fa \proves (S^n(\bz) < S(S^m(\bz))=S^{m+1}(\bz))$.
Assume now that $n \geq m$. Working in $\fa$, asssume towards a
contradiction that $S^n(\bz) < S^m(\bz)$. From lemma~\ref{leminit}
we have $\fa \proves S^n(\bz)\approx \bz \vee \dots \vee
S^n(\bz) \approx S^{m-1}(\bz)$. However, from the equality case we know
that $\fa \proves \neg(S^n(\bz) \approx \bz)$, $\dots,
\fa \proves \neg( S^n(\bz) \approx S^{m-1}(\bz))$. This is a contradiction.
\end{proof}
\begin{lem} \label{lemd0}
If $\phi \in \lD_0$ then the relation $R$ defined by $\phi$
is representable in $\fa$.
\end{lem}
\begin{proof}
It suffices to show that if $R(x,y)$ is representable by $\phi(x,y)$,
then $S(n) \leftrightarrow \exists m \leq n \ R(n,m)$ is representable.
Let $\psi(x)= \exists y \ ( y \leq x \wedge \phi(x,y))$.
Let $n \in \omega$, and first suppose $S(n)$. Thus there is an
$m \leq n$ such that $R(n,m)$. Thus, $\fa \proves
\phi(S^n(\bz), S^m(\bz))$. From lemma~\ref{lemqf}, $\fa \proves
S^n(\bz) \leq S^m(\bz)$. Hence, $\fa \proves
\exists y\ (y \leq S^n(\bz) \wedge \phi(S^n(\bz),y)$, that is,
$\fa \proves \psi(S^n(\bz))$.
Assume now that $n \in \omega$ and $\neg S(n)$, hence
for all $m \leq n$ we have $\neg R(n,m)$. From lemma~\ref{leminit},
$\fa \proves \forall y\ (y \leq S^n(\bz) \rightarrow
y \approx \bz \vee \cdots \vee y \approx S^n(\bz))$.
Since $\phi$ represents $R$ we also have
$\fa \proves \neg \phi(S^n(\bz), \bz)$, $\dots,
\fa \proves \neg \phi(S^n(\bz), S^{n}(\bz))$. These two statements logically
imply $\forall y\ (y \leq S^n(\bz) \rightarrow \neg (\phi(S^n(\bz),y)$.
Thus, $\fa \proves \neg \exists y \leq S^n(\bz)\ (\phi(S^n(\bz),y))$, that is,
$\fa \proves \neg \psi(S^n(\bz))$ and we are
done.
\end{proof}
\begin{cor}
For any $k \in \omega$, the $k$-ary coding function
$(a_0,\dots,a_{k-1}) \to \langle a_0,\dots,a_{k-1} \rangle$
is representable. Also for any $jm$. Since $m S^{w_0}(\bz))$. The latter case is
clearly a contradiction. From lemma~\ref{leminit} and lemma~\ref{lemd0}
$\fa \proves \forall w< S^{w_0}(\bz)\
\neg \chi(S^n(\bz),S^{\vec a}(\bz), w)$ which shows that the assumption
$w < S^{w_0}(\bz)$ also leads to a contradiction. Thus we may deduce
(from $\fa$ and $\tau( S^n(\bz), S^{\vec a} (\bz), S^r(\bz),w)$)
that $w \approx S^{w_0}(\bz)$. Thus we may deduce
$\tau( S^n(\bz), S^{\vec a} (\bz), S^r(\bz),S^{w_0}(\bz))$, which is
a contradiction since $\fa \proves
\neg \tau( S^n(\bz), S^{\vec a} (\bz), S^r(\bz),S^{w_0}(\bz))$ from
lemma~\ref{lemd0}.
\end{proof}
\section {Incompleteness} \label{secincomplete}
In this section we prove several versions of the G\"odel
incompleteness theorem. First we define a coding of the formulas of
number theory into the integers. Fix a bijection $\pi$ between the
finitely many symbols of the language (including the logical symbols)
excluding the (infinitely many) variable symbols and the
set $\{ 0, 1, \dots, n_0-1\}$. Extend $\pi$ to the variables by
$\pi(x_k)=n_0+k$. Then $\pi$ is a bijection between the logical symbols and
the integers, and the relation $R(a,b) \leftrightarrow
\pi(x_a)=b$ is clearly recursive.
\begin{defn}
If $\phi=s_0 s_1,\dots,s_k$ is a string of symbols in the language of number
theory, then the \emph{G\"odel code} of $\phi$ is defined by
$\#(\phi)= \langle \pi(s_0),\dots,\pi(s_k)\rangle \in \omega$.
\end{defn}
We will use in the folowing arguments the fact that certain relations
and (total) functions on the integers are recursive. In fact, all of the
relations and functions we need are primitive recursive. These
facts can be easily checked from the closure properties of
recursive functions of \S~\ref{secrec}; we leave the details
to the reader.
The next result is the key technical lemma for the incompleteness results.
It says, in effect, that we may construct self-referential formulas.
The formulas attempt to refer to themselves by referring to the
G\"odel codes of themselves.
\begin{lem} \label{lemfp}
Let $\theta(x)$ be a formula in the language of number theory
with one free variable $x$. Then there is a sentence $\sigma$
(in the language of number theory) such that
$\fa \proves (\sigma \leftrightarrow \theta( S^{\# \sigma}(\bz)))$.
\end{lem}
\begin{proof}
Let $f \colon \omega \to \omega$ be the primitive recursive
function defined as follows. If $n$ is the code of a
formula $\psi$ with one free variable, then $f(n)$ is the
code of the sentence $\psi(S^{\# \psi }(\bz))$. Otherwise,
let $f(n)=0$. Let $\rho(x,y)$ strongly represent $f$ in $\fa$.
Let $\tau$ be the formula
$$
\tau= \exists x_1 \ ( \rho(x_0,x_1) \wedge \theta(x_1)).
$$
Note that $\tau$ has one free variable, $x_0$.
Let $n= \# \tau$. Let $\sigma= \tau ( S^{\# \tau}(\bz))$.
Let $m=f(n)$, which is the code for
$\tau ( S^{\# \tau}(\bz))=\sigma$.
We show that $\sigma$ works. Working within $\fa$, first assume
$\sigma$. Thus,
$\exists x_1 \ ( \rho(S^{\# \tau}(\bz),x_1) \wedge \theta(x_1))$.
By strong representability,
$\fa \proves \forall x_1\ (\rho(S^{\# \tau}(\bz),x_1) \rightarrow
x_1 \approx S^m(\bz))$.
These two sentences logically imply
$\theta(S^m(\bz))$, that is, $\theta(S^{\# \sigma}(\bz))$.
Assume next
$\theta( S^{\# \sigma}(\bz))$, that is,
$\theta( S^{m}(\bz))$. Since $\fa \proves \rho(S^n(\bz), S^m(\bz))$
by representability, we may deduce
$\exists x_1 \ ( \rho(S^n(\bz),x_1) \wedge \theta(x_1))$.
Thus, we may deduce $\tau(S^n(\bz))$, that is, $\sigma$.
\end{proof}
We now state the first version of the incompleteness theorem.
We call a set of sentences $T$ recursive if $\{ \# \phi \colon
\phi \in T\}$ is recursive. The reader will note
that the sentence $\sigma$ constructed in the following
proof is a formalization of the statement
``this sentence is not provable.''
\begin{thm} \label{godel1}
Let $T$ be a consistent, recursive set of sentences in the language of number
theory which contains $\fa$. Then $T$ is incomplete, that is,
there is a sentence $\sigma$ such that $T \nvdash \sigma$ and
$T \nvdash \neg \sigma$.
\end{thm}
\begin{proof}
Towards a contradiction assume that $T$ is complete. Let
$R =\{ \# \phi \colon T \proves \phi \}$. We claim that $R$
is recursive. This is because we may check if $n \in R$ by
enumerating all possible deductions from $T$ and checking
at each step if it is a deduction from $T$ of either
$\phi$ (the formula with code $n$) or a deduction of
$\neg \phi$. We output a $1$ if for the least such deduction
we encounter it is a deduction of $\phi$.
Checking if an integer codes a valid deduction
from $T$ is recursive, using the assumption that $T$ is
recursive. This algorithm will always terminate
by our completeness assumption. The answer will be correct
as $T$ is consistent.
Let $\theta$ represent $\neg R$ in $F$. Let $\sigma$ be the sentence of
lemma~\ref{lemfp} applied to $\theta$. Thus, $\fa$, and hence $T$
proves the statement
$$
\sigma \leftrightarrow \theta( S^{\# \sigma}(\bz)).
$$
Let $n=\# \sigma$. If $R(n)$, then $\fa \proves
\neg \theta (S^n (\bz))$, and so $T \proves
\neg \sigma$. Thus, $\neg R(n)$, a contradiction.
If $\neg R(n)$, then $\fa \proves \theta (S^n(\bz))$,
and so $T \proves \sigma$. Hence $R(n)$, a contradiction.
\end{proof}
Theorem~\ref{godel1} was proved by contradiction, and thus does
not actually produce a concrete sentence $\sigma$ which is
independent of $T$. With a little extra argument we can do this.
First we give the argument due to G\"odel which shows this under
a slightly stronger hypothesis.
\begin{defn}
We say $T$ is $\omega$-consistent if there is no formula $\phi(x)$
such that for all $n \in \omega$, $T \proves \neg \phi(S^n(\bz))$
but $T \proves \exists x\ \phi(x)$.
\end{defn}
Of course, an $\omega$-consistent theory is consistent, but the
converse is not true. An $\omega$-inconsistent theory is one
that has no standard model.
For $T$ a recursive set of sentences in the language of number theory, let
$R_T$ be the relation defined by $R_T(a,b)$ iff
$b$ is the code of a deduction from $T$ of the formula with code
$a$. $R_T$ is clearly recursive. Let $\theta(x,y)$
strongly represent $R$ in $\fa$. Let $\tau(x)= \neg \exists y\ \theta(x,y)$,
and let $\sigma_1=\sigma_1(T)$ be the
sentence such that $\fa \proves
\sigma_1(T) \leftrightarrow \tau ( S^{\# \sigma_1(T)}(\bz))$
from lemma~\ref{lemfp}.
\begin{thm} \label{godel2}
Let $T$ be an $\omega$-consistent,
recursive set of sentences in the language of number
theory which contains $\fa$. Then $T \nvdash \sigma_1$ and
$T \nvdash \neg \sigma_1$.
\end{thm}
\begin{proof}
The proof is similar to theorem~\ref{godel1}.
Assume first that $T \proves \sigma_1$. Let $n=
\# \sigma_1$. Let $m$ code a deduction of $\sigma_1$ from $T$.
Thus, $T \proves \theta(S^n(\bz),S^m(\bz))$ (in the notation above).
This logically implies $\neg \tau (S^n(\bz))$, and thus
$T \proves \neg \sigma_1$. This contradicts the assumption
that $T$ is consistent (note: this case only used the
consistency of $T$).
Assume next that $T \proves \neg \sigma_1$.
Thus, $T \proves \neg \tau ( S^{n}(\bz))$, and so
$T \proves \exists y\ \theta(S^n(\bz),y)$.
Since $T \nvdash \sigma_1$ from the previous paragraph, we
know that for all $m \in \omega$ that
$\neg R_T(n,m)$, and hence $T \proves \neg \theta(S^n(\bz),S^m(\bz))$.
This contradicts the $\omega$-consistency of $T$.
\end{proof}
The extra hypothesis of $\omega$-consistency, though minor, is
slightly annoying. An improvement of theorem~\ref{godel2},
due to Rosser, shows that it is actually unnecessary.
Let $R_T(a,b)$ and $\theta(x,y)$ be as above. Let $g \colon
\omega \to \omega$ be a recursive function such that if
$a$ is the code of $\phi$, then $g(a)$ is the code of $\neg \phi$.
Let $\rho(x,y)$ strongly represent $g$.
Let $\tau(x)$ be the formula
$$
\tau= \forall y\ ( \theta(x,y) \rightarrow \exists z < y\ \exists w\
(\rho(x,w) \wedge \theta(w,z)))
$$
Let $\sigma_2=\sigma_2(T)$ be the sentence from lemma~\ref{lemfp}
for this $\tau$.
\begin{thm} \label{rosser}
Let $T$ be a consistent,
recursive set of sentences in the language of number
theory which contains $\fa$. Then $T \nvdash \sigma_2$ and
$T \nvdash \neg \sigma_2$.
\end{thm}
\begin{proof}
Let $n = \# \sigma_2$.
Assume first $T \vdash \sigma_2$. Let $m$ code a deduction of
$\sigma_2$ from $T$. So, $T \proves \theta(S^n(\bz),S^m(\bz))$.
Also, $T \proves
\forall y\ ( \theta(S^n(\bz),y) \rightarrow \exists z < y\ \exists w\
(\rho(S^n(\bz),w) \wedge \theta(w,z)))$.
These statements logically imply
$\exists z < S^m(\bz)\ \exists w\
(\rho(S^n(\bz),w) \wedge \theta(w,z))$. We violate the consistency
of $T$ by showing $T \proves \forall z < S^m(\bz)\
\forall w\ (\rho(S^n(\bz),w) \rightarrow \neg \theta(w,z))$.
From lemma~\ref{leminit} it is enough to fix $k S^m(\bz)$). From lemma~\ref{leminit}
it is enough to show that each $k \leq S^m(\bz)$ that
$T''=T \cup \{ ( \theta(S^n(\bz),S^k(\bz))
\wedge \forall z < S^k(\bz)\ \forall w\
(\rho(S^n(\bz),w) \rightarrow \neg \theta(w,z))) \}$ is inconsistent.
This is clearly the case, however, since for all such $k$
we have $T \proves \neg \theta(S^n(\bz),S^k(\bz))$ since by consistency
$R(n,k)$ holds (recall we are assuming $T \proves \neg \sigma_2$).
\end{proof}
Note that the sentences $\sigma_1$, $\sigma_2$ of the G\"odel
theorems are $\lP_1$ sentences in the language of number
theory. Thus, incompleteness arises for sentences having only
one unbounded number quantifier.
The incompleteness theorems as we stated them apply to theories in
the language of number theory, however it ie not difficult to see that they
consequently apply to to theories in which we can ``interpret''
the theory $\fa$. To make this precise, let $\sL$ denote the
language of number theory, and $\sL'$ a first-order language
(e.g., the language of set theory). Suppose we have formulas
$\alpha_\N$, $\alpha_+$, $\alpha_{\cdot}$, $\alpha_E$, $\alpha_<$,
$\alpha_S$, $\alpha_{\bz}$ of $\sL'$. $\alpha_\N$ is intending
to define a ``copy'' of $\N$, and the other formulas, $\alpha_+$ for
example, are intending to define the corresponding function, relation, or
constant symbol on this copy. Let $T'$ be a theory (set of sentences)
in $\sL'$, for example $T'$ might be the axioms of $\zfc$.
Suppose $T'$ proves that $\exists x\ \alpha_\N (x)$ (i.e., the
copy of $\N$ is non-empty) and also for each of the axioms $\psi$
of $\fa$, $T' \proves \psi'$ where $\psi'$ is the interpretation
of $\psi$ into $\sL'$ using the $\alpha$ formula in a natural way.
For example, an atomic formula of the form $x+(y \cdot z) \approx w$
is replaced by
$\exists z_1 \exists z_2\ \ (\alpha_\N(z_1) \wedge \alpha_\N(z_2)
\wedge \alpha_{\cdot}(y,z,z_1) \wedge \alpha_{+}(x,z_1,z_2)
\wedge z_2 \approx w)$. In this way, each formula $\psi$ of number
theory is replaced by a formula $\psi'$ of $\sL'$ such that if
$\fa \proves \psi$ then $T' \proves \psi'$.
Of course, $T'$ may prove more about its copy of $\N$ than does $\fa$,
for example, if $\sL'$ is the language of set theory and $\alpha_\N=
$``$x \in \omega$,'' (and the other $\alpha$ are defined in the usual
way these functions etc.\ are defined in set theory), then $\zfc$
proves much more about $\N$ than $\fa$ does, in particular $\zfc$
proves that all of the Peano axioms hold in $\N$.
At any rate, if $T'$ proves all of the $\psi'$ for $\psi \in \fa$, then
all of the proofs of the incompleteness results given above for $\sL$
may be carried over immediately for theories extending $T'$. Since $\fa$
is finite, it follows that there is a finite $T'$ which suffices to
prove all of the $\psi'$.
For example, let $\zfc'$ denote the finite subset of $\zfc$
which suffices to prove all of the $\psi'$ for $\psi \in \fa$.
We then have:
\begin{thm}
Let $T$ be a recursive, consistent theory in the language of set theory
which extends the finite fragment $\zfc'$.
Then $T$ is imcomplete. Moreover, there is a $\lP_1$ sentence
$\sigma_2=\sigma_2(T)$ such that $T \nvdash \sigma_2$ and
$T \nvdash \neg \sigma_2$.
\end{thm}
\begin{proof}
Define $R(a,b)$ and $\theta(x,y)$ as in theorem~\ref{rosser}.
Let $\theta'(x,y)$ be the interpretation of $\theta$ into
the language of set theory. Thus, if $R(,b)$ then $\fa \proves
\theta(S^a(\bz),S^b(\bz))$ and so $T \proves \theta'_{a,b}$ and likewise
for $\neg R(a,b)$, where $\theta'_{a,b}$ denotes the interpretation of
$\theta(S^a(\bz),S^b(\bz))$. The proof is now essentially identical to
theorem~\ref{rosser} using $\theta'$ in place of $\theta$.
\end{proof}
Lastly, we discuss the second G\"odel incompleteness theorem.
We now consider theories $T$ which may in the language of number theory,
or set theory, etc., for which we have an interpretation of $\N$
as above. Let $R(b)$ iff $b$ codes a deduction from $T$ of
a logical contradiction, say $\exists x\ (x \napprox x)$.
Let $\theta(x)$ represent $R$ in $\fa$, and let
$\con_T$ be the sentence $\neg \exists x\ \theta$. If $T$ is in
set theory, say, then we let $\con_T$ be the interpretation of this
sentence into the language of set theory. The second version of the incompleteness
theorem say that if $T$ is recursive, consistent, and sufficiently strong
(but we need more that $T$ contains $\fa$ now), then
$T \nvdash \con_T$. It is enough to have $T$ contain $\pa$
(even a smaller fragment of it, say $\lP_1$-induction), but we state the
result now for theories extending $\zfc$.
\begin{thm}
Let $T$ be a recursive, consistent theory in the language of
set theory extending $\zfc$. Then $T \nvdash \con_T$.
\end{thm}
\begin{proof}
Let $\sigma_1$ be the sentence from the first version of the
G\"odel incompleteness theorem, so $T \nvdash \sigma_1$ (recall
this direction only used the consistency of $T$).
The proof of this (theorem~\ref{godel2}) was presented in the metatheory.
That is, in the metatheory we showed that if $T$ is consistent, then
$T \nvdash \sigma_1$.
Closer examination
of the proof reveals that the only properties of the integers
used in the proof are theorems of $\pa$. Certainly, however,
they are all theorems of $\zfc$. Thus, this argument in the metatheory,
when formalized, becomes the statement that
$\zfc \proves (\con_T \rightarrow \phi)$, where $\phi$ is the
formalization of the statement ``there does not exist a proof of
$\sigma_1$ from $T$.'' However, this formalization
is just the statement $\tau(S^{\# \sigma_1}(\bz))$ (using the notation
of theorem~\ref{godel2}), and this is $T$ provably equivalent to $\sigma_1$
(more precisely, the interpretations of these statements into
the language of set theory). Thus, $\zfc \proves (\con_T \rightarrow
\sigma_1)$. It follows that $\zfc \nvdash \con_T$ from
theorem~\ref{godel2}.
\end{proof}
\end{document}
__