\documentclass{article}
\usepackage[a4paper]{geometry}
\usepackage{enumitem}
\usepackage{stmaryrd}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{proof}
\DeclareMathOperator{\lcm}{lcm}
\newcommand{\T}{\ensuremath{\mathtt{T}}}
\newcommand{\F}{\ensuremath{\mathtt{F}}}
\newcommand{\limp}{\Rightarrow}
\newcommand{\lonlyif}{\limp}
\newcommand{\liff}{\Leftrightarrow}
\newcommand{\limpby}{\Leftarrow}
\newcommand{\lif}{\limpby}
\newcommand{\mvar}[1]{\ensuremath{\mathtt{#1}}}
\newcommand{\var}[1]{\mvar{#1}}
\newcommand{\A}{\mvar{A}}
\newcommand{\B}{\mvar{B}}
\newcommand{\C}{\mvar{C}}
\newcommand{\interp}[2]{{\llbracket#2\rrbracket_\mathcal{#1}}}
\newcommand{\inM}[1]{\interp{M}{#1}}
\newcommand{\tableindent}{\hspace{3em}}
\newcommand{\TODO}[1]{\textbf{TODO: }#1}
\begin{document}
\title{R06 Exercises}
\author{Ramana Kumar (rk436)}
\maketitle
\section*{EXERCISES 2}
\subsection*{Exercise 2.1}
\begin{enumerate}[label=(\roman{*})]
\item
Consider truth tables for $\lnot\B\limp\lnot\A$ and for $\A\limp\B$.
\begin{table}[h]
\tableindent
\begin{tabular}{cc|ccc}
$\A$&$\B$&$\lnot\B$&$\lnot\A$&$\lnot\B\limp\lnot\A$\\\hline
\T&\T&\F&\F&\T\\
\T&\F&\T&\F&\F\\
\F&\T&\F&\T&\T\\
\F&\F&\T&\T&\T\\
\end{tabular}
\tableindent
\begin{tabular}{cc|c}
$\A$&$\B$&$\A\limp\B$\\\hline
\T&\T&\T\\
\T&\F&\F\\
\F&\T&\T\\
\F&\F&\T\\
\end{tabular}
\end{table}
The truth values in the left two and rightmost columns are the same in both tables.
It follows that $\lnot\B\limp\lnot\A$ and $\A\limp\B$ are logically equivalent.
The truth table rule for $\liff$ assigns \T{} when the inputs are the same and \F{} otherwise.
Thus we have the following truth table.
\begin{table}[h]
\tableindent
\begin{tabular}{cc|ccc}
$\A$&$\B$&$\lnot\B\limp\lnot\A$&$\A\limp\B$&$(\lnot\B\limp\lnot\A)\liff(\A\limp\B)$\\\hline
\T&\T&\T&\T&\T\\
\T&\F&\F&\F&\T\\
\F&\T&\T&\T&\T\\
\F&\F&\T&\T&\T\\
\end{tabular}
\end{table}
The rightmost column has \T{} in every row, so $(\lnot\B\limp\lnot\A)\liff(\A\limp\B)$ is a tautology.
Indeed, it is clear that connecting any logically equivalent propositions by $\liff$ will result in a tautology.
\item
We prove the equality as follows.
\begin{align*}
\inM{\lnot\B\limp\lnot\A}&=\inM{\lnot\lnot\B\lor\lnot\A}&\text{definition of $\limp$}\\
&=\inM{\lnot\lnot\B}\cup\inM{\lnot\A}&\text{interpretation of $\lor$}\\
&=\inM{\lnot\A}\cup\inM{\lnot\lnot\B}&\text{commutativity of $\cup$}\\
&=\inM{\lnot\A}\cup\inM{\lnot\B}^c&\text{interpretation of $\lnot$}\\
&=\inM{\lnot\A}\cup\left(\inM{\B}^c\right)^c&\text{interpretation of $\lnot$}\\
&=\inM{\lnot\A}\cup\inM{\B}&\text{complement identity}\\
&=\inM{\lnot\A\lor\B}&\text{interpretation of $\lor$}\\
&=\inM{\A\limp\B}&\text{definition of $\limp$}\\
\end{align*}
The equality above, considered as subset inclusion in both directions, can be rephrased as $\lnot\B\limp\lnot\A$ entails $\A\limp\B$ in $\mathcal{M}$, and $\A\limp\B$ entails $\lnot\B\limp\lnot\A$ in $\mathcal{M}$.
Since $\mathcal{M}$ is arbitrary, we deduce both $\lnot\B\limp\lnot\A\models\A\limp\B$ and $\A\limp\B\models\lnot\B\limp\lnot\A$, which together mean $(\lnot\B\limp\lnot\A)=(\A\limp\B)$.
By Corollary 2.12 we conclude $\models\left[(\lnot\B\limp\lnot\A)\liff(\A\limp\B)\right]$.
\end{enumerate}
\subsection*{Exercise 2.3}
\paragraph{Base cases}
In each case, the result follows from $1\leq{2}$:
\begin{align*}
1=|\var{a}|=|tr(\var{a})|&\leq{3}|\var{a}|-1=3(1)-1=2\\
1=|\T|=|tr(\T)|&\leq{3}|\T|-1=3(1)-1=2\\
1=|\F|=|tr(\F)|&\leq{3}|\F|-1=3(1)-1=2\\
\end{align*}
\paragraph{Inductive cases}
\begin{itemize}
\item
When $\A=\B\land\C$, our inductive hypotheses are $|tr(\B)\leq{3}|\B|-1$ and $|tr(\C)\leq{3}|\C|-1$.
We reason as follows.
\begin{align*}
|tr(\B\land\C)|&=|tr(\B)\land{tr(\C)}|&\text{definition of $tr$}\\
&=|tr(\B)|+|tr(\C)|+1&\text{definition of $|\cdot|$}\\
&\leq 3|\B|-1+3|\C|-1+1&\text{inductive hypotheses}\\
&=3\left(|\B|+|\C|\right)-1\\
&\leq 3\left(|\B|+|\C|+1\right)-1\\
&=3|\B\land\C|-1&\text{definition of $|\cdot|$}\\
\end{align*}
\item
When $\A=\lnot\B$, our inductive hypothesis is $|tr(\B)|\leq{3}|\B|-1$.
We reason as follows.
\begin{align*}
|tr(\lnot\B)|&=|\lnot{tr(\B)}|&\text{definition of $tr$}\\
&=|tr(\B)|+1&\text{definition of $|\cdot|$}\\
&\leq{3}|\B|-1+1&\text{inductive hypothesis}\\
&\leq{3}(|\B|+1)-1\\
&=3|\lnot\B|-1&\text{definition of $|\cdot|$}\\
\end{align*}
\item
Finally, when $\A=\B\lor\C$, our inductive hypotheses are $|tr(\B)\leq{3}|\B|-1$ and $|tr(\C)\leq{3}|\C|-1$, and we reason as follows.
\begin{align*}
|tr(\B\lor\C)|&=|\lnot\left(\lnot{tr(\B)}\land\lnot{tr(\C)}\right)|&\text{definition of $tr$}\\
&=|\lnot{tr(\B)}\land\lnot{tr(\C)}|+1&\text{definition of $|\cdot|$}\\
&=|\lnot{tr(\B)}|+|\lnot{tr(\C)}|+1+1&\text{definition of $|\cdot|$}\\
&=|tr(\B)|+1+|tr(\C)|+1+1+1&\text{definition of $|\cdot|$}\\
&\leq{3}|\B|-1+1+3|\C|-1+1+1+1&\text{inductive hypotheses}\\
&=3|\B|+3|\C|+2\\
&=3(|\B|+|\C|+1)-1\\
&=3|\B\lor\C|-1&\text{definition of $|\cdot|$}\\
\end{align*}
\end{itemize}
\section*{EXERCISES 3}
\subsection*{Exercise 3.1}
($\limpby$)
Assuming $a=a'$ and $b=b'$, the equality $\{\{a\},\{a,b\}\}=\{\{a'\},\{a',b'\}\}$ follows immediately.
($\limp$)
Assume $\{\{a\},\{a,b\}\}=\{\{a'\},\{a',b'\}\}$.
\begin{itemize}
\item
If $a=b$, we have $\{\{a\},\{a,b\}\}=\{\{a\},\{a,a\}\}=\{\{a\},\{a\}\}=\{\{a\}\}=\{\{a'\},\{a',b'\}\}$.
Therefore $\{\{a'\},\{a',b'\}\}\subseteq\{\{a\}\}$, which means, in particular, $\{a',b'\}\in\{\{a\}\}$.
Since there is only one element on the right, we must have $\{a',b'\}=\{a\}$, which implies $\{a',b'\}\subseteq\{a\}$.
Again since there is only one element on the right, we must have both $a'=a$ and $b'=a$ (and, by assumption, $a=b$, so $b'=b$).
\item
If $a\neq{b}$, then $\{\{a\},\{a,b\}\}$ contains exactly two sets, one, $\{a\}$, of size $1$, and one, $\{a,b\}$, of size $2$.
We must have $a'\neq{b'}$, since otherwise $\{\{a'\},\{a',b'\}\}$ would contain only one set.
Therefore $\{a',b'\}$ has size 2 and is the only set of size $2$ in $\{\{a'\},\{a',b'\}\}$.
It follows that $\{a',b'\}=\{a,b\}$, and $\{a'\}=\{a\}$.
From the latter equality we can deduce $a'=a$, and therefore $\{a,b'\}=\{a,b\}$.
Now consider intersection with $\{b\}$: on the right we have $\{a,b\}\cap\{b\}=\{b\}$, and on the left we must have either $\{a,b'\}\cap\{b\}=\{b'\}$ or $\{a,b'\}\cap\{b\}=\emptyset$.
Since $\{b\}$ is clearly not empty, we must have $\{b\}=\{a,b\}\cap\{b\}=\{a,b'\}\cap\{b\}=\{b'\}$, and thus $b=b'$.
\end{itemize}
\subsection*{Exercise 3.3}
\begin{enumerate}[label=(\roman{*}),ref=(\roman{*})]
\item\label{refl}
By definition, whenever $p\,\mathsf{id}_P\,q$, then in fact $p=q$.
We prove the two bisimulation conditions for $p\,\mathsf{id}_P\,q$ as follows.
\begin{itemize}
\item
Given any $p'$ with $p\longrightarrow{p'}$, we immediately have a $q'$, namely $p'$, such that both $q[=p]\longrightarrow[p'=]q'$ and $p'[=q']\,\mathsf{id}_P\,q'$.
\item
Symmetrically, given any $q'$ with $q\longrightarrow{q'}$, we immediately have a $p'$, namely $q'$, such that $p\longrightarrow{p'}$ and $p'\,\mathsf{id}_P\,{q'}$.
\end{itemize}
\item\label{sym}
Assume $R$ is a bisimulation on $P$.
Suppose $p\,R^{-1}\,q$.
By definition of the converse relation, this means $q\,R\,p$.
Since $R$ is a bisimulation, we infer
\begin{itemize}
\item
$\forall{p'}\in{P}.\;q\longrightarrow{p'}\limp\exists{q'}\in{P}.\;p\longrightarrow{q'}\land{p'}\,R\,q'$, and
\item
$\forall{q'}\in{P}.\;p\longrightarrow{q'}\limp\exists{p'}\in{P}.\;q\longrightarrow{p'}\land{p'}\,R\,q'$.
\end{itemize}
We prove the two bisimulation conditions for $p\,R^{-1}\,q$ as follows.
\begin{itemize}
\item
Given any $p'$ with $p\longrightarrow{p'}$, we deduce from the second condition for $q\,R\,p$ that there exists a $q'$ such that $q\longrightarrow{q'}$ and $q'\,R\,p'$.
Observing that $q'\,R\,p'$ implies $p'\,R^{-1},q'$, we satisfy the first condition for $p\,R^{-1}\,q$.
\item
Given any $q'$ with $q\longrightarrow{q'}$, we deduce from the first condition for $q\,R\,p$ that there exists a $p'$ such that $p\longrightarrow{p'}$ and $q'\,R\,p'$.
Observing that $q'\,R\,p'$ implies $p'\,R^{-1}\,q'$, we satisfy the second condition for $p\,R^{-1}\,q$.
\end{itemize}
\item\label{trans}
Assume $R$ and $S$ are bisimulations on $P$.
Suppose $p\,(S\circ{R})\,q$.
By definition of composition, this means there exists an $m$ such that $p\,R\,m$ and $m\,S\,q$.
Since $R$ and $S$ are bisimulations, we infer
\begin{itemize}
\item
$\forall{p'}\in{P}.\;p\longrightarrow{p'}\limp\exists{q'}\in{P}.\;m\longrightarrow{q'}\land{p'}\,R\,q'$,
\item
$\forall{q'}\in{P}.\;m\longrightarrow{q'}\limp\exists{p'}\in{P}.\;p\longrightarrow{p'}\land{p'}\,R\,q'$,
\end{itemize}
\begin{itemize}
\item
$\forall{p'}\in{P}.\;m\longrightarrow{p'}\limp\exists{q'}\in{P}.\;q\longrightarrow{q'}\land{p'}\,S\,q'$, and
\item
$\forall{q'}\in{P}.\;q\longrightarrow{q'}\limp\exists{p'}\in{P}.\;m\longrightarrow{p'}\land{p'}\,S\,q'$.
\end{itemize}
We prove the bisimulation conditions for $p\,(S\circ{R})\,q$ as follows.
\begin{itemize}
\item
Given any $p'$ with $p\longrightarrow{p'}$, we deduce from the first condition for $p\,R\,m$ that there exists a $q'$ such that $m\longrightarrow{q'}$ and $p'\,R\,q'$.
From $m\longrightarrow{q'}$ and the first condition on $m\,S\,q$, we deduce that there exists a $q''$ such that $q\longrightarrow{q''}$ and $q'\,S\,q''$.
Now we have $q\longrightarrow{q''}$ and $p'\,(S\circ{R})\,q''$, so we have satisfied the first condition for $p\,(S\circ{R})\,q$.
\item
Given any $q'$ with $q\longrightarrow{q'}$, we deduce from the second condition for $m\,S\,q$ that there exists a $p'$ such that $m\longrightarrow{p'}$ and $p'\,S\,q'$.
From $m\longrightarrow{p'}$ and the second condition on $p\,R\,m$, we deduce that there exists a $p''$ such that $p\longrightarrow{p''}$ and $p''\,R\,p'$.
Now we have $p\longrightarrow{p''}$ and $p''\,(S\circ{R})\,q'$, so we have satisfied the second condition for $p\,(S\circ{R})\,q$.
\end{itemize}
\end{enumerate}
We prove $\sim$ is an equivalence relation in three parts.
\begin{description}
\item[Reflexivity]\hfill\\
For all $p\in{P}$ we have $p\,\mathsf{id}_P\,p$, and from \ref{refl} we know $\mathsf{id}_P$ is a bisimulation, therefore $p\sim{p}$.
\item[Symmetry]\hfill\\
Suppose $p\sim{q}$.
Then there exists a bisimulation $R$ such that $p\,R\,q$.
Since $R$ is a bisimulation, we deduce $R^{-1}$ is a bisimulation from \ref{sym}.
From $p\,R\,q$ we deduce $q\,R^{-1}\,p$, and therefore $q\sim{p}$.
\item[Transitivity]\hfill\\
Suppose $p\sim{m}$ and $m\sim{q}$.
Then there are bisimulations $R$ and $S$ such that $p\,R\,m$ and $m\,S\,q$.
From \ref{trans} we infer that $S\circ{R}$ is a bisimulation.
Now since $p\,(S\circ{R})\,q$, we conclude $p\sim{q}$.
\end{description}
Finally, we show $sim$ is a bisimulation.
Suppose $p\sim{q}$.
Then there exists a bisimulation $R$ such that $p\,R\,q$.
Therefore
\begin{itemize}
\item
$\forall{p'}\in{P}.\;p\longrightarrow{p'}\limp\exists{q'}\in{P}.\;q\longrightarrow{q'}\land{p'}\,R\,q'$, and
\item
$\forall{q'}\in{P}.\;q\longrightarrow{q'}\limp\exists{p'}\in{P}.\;p\longrightarrow{p'}\land{p'}\,R\,q'$.
\end{itemize}
But by the definition of $\sim$, and the fact that $R$ is a bisimulation, it follows that
\begin{itemize}
\item
$\forall{p'}\in{P}.\;p\longrightarrow{p'}\limp\exists{q'}\in{P}.\;q\longrightarrow{q'}\land{p'}\sim{q'}$, and
\item
$\forall{q'}\in{P}.\;q\longrightarrow{q'}\limp\exists{p'}\in{P}.\;p\longrightarrow{p'}\land{p'}\sim{q'}$.
\end{itemize}
Therefore $\sim$ is a bisimulation.
\subsection*{Exercise 3.4}
\begin{description}
\item[Reflexivity]\hfill\\
We have $n\leq{n}$ for all $n\in\mathbb{N}$, since every natural number divides itself.
\item[Transitivity]\hfill\\
Suppose $n_1$ divides $n_2$ and $n_2$ divides $n_3$.
That is, there exist factors $d_1$ and $d_2$ such that $n_2=d_1{n_1}$, and $n_3=d_2{n_2}$.
Then $n_3=d_2(d_1{n_1})=(d_2d_1)n_1$, so $n_1$ divides $n_3$ as required.
\item[Antisymmetry]\hfill\\
Suppose $n$ divides $m$ and $m$ divides $n$.
That is, $n=d_1{m}$ and $m=d_2{n}$ for some natural numbers $d_1$ and $d_2$.
Then $n=d_1{d_2}{n}$, which means $d_1d_2=1$.
In $\mathbb{N}$ it follows that $d_1=d_2=1$, therefore $n=m$ as required.
\item[Lubs and Glbs]\hfill\\
% least upper bound = supremum = vee = lcm
% greatest lower bound = infimum = wedge = gcd
Let $n,m\in\mathbb{N}$.
Observe that, by definition, $\gcd(n,m)$ divides both $n$ and $m$, and is the greatest natural number that does so.
Similarly, $\lcm(n,m)$ is the least natural number divisible by $n$ and $m$.
Therefore $n\vee{m}=\lcm(n,m)$ and $n\wedge{m}=\gcd(n,m)$.
In this partial order, the least upper bound is commonly known as the lowest common multiple, and the greatest lower bound as the greatest common divisor.
\item[In $\mathbb{Z}$]\hfill\\
The ``divides'' relation gives a preorder over the integers, but antisymmetry fails to hold.
In particular, an integer $a$ and its negation $-a$ both divide one another, but are not equal.
\end{description}
\section*{EXERCISES 4}
\subsection*{Exercise 4.4}
We take for granted that every real number is either rational or irrational, and that every rational number is either positive or negative.
Thus $\mathbb{R}=\mathbb{Q}^+\cup\mathbb{Q}^-\cup\mathrm{Irr}$, where $\mathrm{Irr}$ is the set of irrational numbers.
By Theorem 3.37, $\mathbb{R}$ is uncountable.
By Lemma 3.32, if $\mathbb{Q}^+$, $\mathbb{Q}^-$, and $\mathrm{Irr}$ were all countable, then so would be $\mathbb{R}$, therefore at least one of them is uncountable.
Corollary 3.30 says $\mathbb{Q}^+$ is countable.
Lemma 3.28 implies $\mathbb{Q}^-$ is countable as long as there is an injection from $\mathbb{Q}^-$ to $\mathbb{Q}^+$.
The function $f:\mathbb{Q}^-\to\mathbb{Q}^+$ given by $f(q)=-q$ is clearly an injection: if $f(q_1)=f(q_2)$ then $-q_1=-q_2$, hence $q_1=q_2$.
Therefore $\mathbb{Q}^-$ is also countable, forcing the conclusion that $\mathrm{Irr}$ is uncountable.
\subsection*{Exercise 4.6}
\begin{enumerate}[label=(\roman{*}),ref=(\roman{*})]
\item
Let $a\in{C}$ and $a'\notin{C}$.
By definition, $h(a)=f(a)$ and $h(a')=g^{-1}(a')$.
Now suppose $h(a)=h(a')$, so $f(a)=g^{-1}(a')$, and thus $g(f(a))=g(g^{-1}(a'))=a'$.
Since $a\in{C}$, there exists an $n$ such that $a\in{C_n}$.
It follows that $g(f(a))\in{C_{n+1}}$, and thus $g(f(a))\in{C}$.
This is a contradiction, since we also know $g(f(a))=a'\notin{C}$.
Therefore we must have $h(a)\neq{h(a')}$.
We have shown $a\in{C}\land{a'\notin{C}}\lonlyif{h(a)\neq{h(a')}}$.
Taking the contrapositive, we obtain $h(a)=h(a')\lonlyif{a\notin{C}\lor{a'\in{C}}}$.
Instantiating the contrapositive again, we also get $h(a')=h(a)\lonlyif{a'\notin{C}\lor{a\in{C}}}$.
Now if we assume $h(a)=h(a')$, we get both $a'\notin{C}\lor{a\in{C}}$ and $a\notin{C}\lor{a'\in{C}}$.
If $a\in{C}$ we infer $a'\in{C}$ from the second result, and if $a\notin{C}$ we infer $a'\notin{C}$ from the first result.
Thus $a$ and $a'$ are either both in $C$ or both not.
If $a,a'\in{C}$, then $h(a)=f(a)=f(a')=h(a')$ and $a=a'$ follows from the injectivity of $f$.
If $a,a'\notin{C}$, then $h(a)=g^{-1}(a)=g^{-1}(a')=h(a')$ and $a=a'$ follows from the injectivity of $g^{-1}$.
Thus $h(a)=h(a')\lonlyif{a=a'}$, which means $h$ is injective.
\item
We show $(g\circ{f})\,C=C\setminus{C_1}$ by mutual inclusion.
$(g\circ{f})\,C\subseteq{C\setminus{C_1}}$\\
Suppose $x\in(g\circ{f})\,C$, which means there exist $y$ and $n$ such that $x=g(f(y))\in{C_n}$.
We infer $n\neq{1}$, because $C_1$ does not include any element of $g\,{B}\ni{g(f(y))}$.
Therefore $n=m+1$ for some $m\geq{1}$, and $g(f(y))\in{(g\circ{f})\,{C_m}}$.
This means there exists a $y'\in{C_m}$ such that $g(f(y))=g(f(y'))$, but since $g$ and $f$ are both injective, we have $y=y'$, and thus $y\in{C_m}$.
It follows that $g(f(y))=x\in{C_n}$, and thus $x\in{C\setminus{C_1}}$.
${C\setminus{C_1}}\subseteq(g\circ{f})\,C$\\
Suppose $x\in{C\setminus{C_1}}$, so there is an $n\neq{1}$ such that $x\in{C_n}$.
By definition, $x\in(g\circ{f})\,{C_{n-1}}$.
Hence $x\in(g\circ{f})\,{C}$.
Now we can show that $g(b)\in{C}$ implies $b\in{f\,{C}}$.
Suppose $g(b)\in{C}$.
Since $C_1$ does not include $g\,{B}$, it follows that $g(b)$ is also in $C\setminus{C_1}$.
Therefore $g(b)\in(g\circ{f})\,{C}$.
So we must have $g(b)=g(f(x))$ for some $x\in{C}$.
Since $g$ is injective, we have $b=f(x)$, and thus $b\in{f\,{C}}$.
Taking the contrapositive of the above result, we get $b\notin{f\,{C}}\lonlyif{g(b)\notin{C}}$.
Let $b\in{B}$.
We proceed by cases on $b\in{f\,{C}}$.
If $b\in{f\,{C}}$ then there exists an $a\in{C}$ such that $b=f(a)$.
Therefore $h(a)=f(a)=b$.
Now if $b\notin{f\,{C}}$ then, as we have shown, $g(b)\notin{C}$.
Therefore $h(g(b))=g^{-1}(g(b))=b$.
In either case $b$ is in the range of $h$, therefore $h$ is surjective.
\end{enumerate}
\section*{EXERCISES 5}
\subsection*{Exercise 5.1}
Take $h$ to be the function defined by $h(z)=(f(z),g(z))$.
We verify $\pi_1(h(z))=\pi_1(f(z),g(z))=f(z)$ and $\pi_2(h(z))=\pi_2(f(z),g(z))=g(z)$ for all $z\in{Z}$, thus $\pi_1\circ{h}=f$ and $\pi_2\circ{h}=g$.
Now suppose $h':Z\to{X}\times{Y}$ also satisfies these two properties.
Let $(a,b)=h'(z)$ for some arbitrary $z\in{Z}$.
We know $\pi_1\circ{h'}=f$, thus $\pi_1(a,b)=f(z)$.
But $\pi_1(a,b)=a$, so we must have $a=f(z)$.
Similarly, since $\pi_2\circ{h'}=g$ we have $b=\pi_2(a,b)=g(z)$.
Therefore $h'(z)=(f(z),g(z))=h(z)$, so in fact $h'=h$.
We conclude that $h$ is unique.
\subsection*{Exercise 5.5}
\begin{enumerate}[label=(\roman{*}),ref=(\roman{*})]
\item\label{no injection from powerset}
Let $f:\mathcal{P}(X)\to{X}$ and let $W=\{f(Z)\in{X}\mid{Z\subseteq{X}\land{f(Z)\notin{Z}}}\}$.
Clearly $W$ is a subset of $X$ since $f$ returns elements of $X$, so we may consider $f(W)$.
We proceed by cases on $f(W)\in{W}$.
If $f(W)\in{W}$, then there is a $Z$ such that $f(W)=f(Z)$, $Z\subseteq{X}$, and $f(Z)\notin{Z}$.
The last condition ensures $Z\neq{W}$, but that means $f$ is not injective since it sends distinct subsets $W$ and $Z$ to the same element.
If $f(W)\notin{W}$, then there is no $Z$ such that $f(W)=f(Z)$, $Z\subseteq{X}$, and $f(Z)\notin{Z}$.
But this is a contradiction, since $W$ is one such subset.
In one case, $f$ is not injective; in the other, we reach a contradiction.
Therefore, $f$ is not injective.
\item
Let $y$ and $z$ be fixed, distinct elements of $Y$.
Define $k:\mathcal{P}(X)\to(X\to{Y})$ by \[k(s)(x)=\begin{cases}y&x\in{s}\\z&x\notin{s}\end{cases}\]
Now suppose $k(s_1)=k(s_2)$.
If $x\in{s_1}$ then $k(s_1)(x)=y$.
Since $k(s_1)=k(s_2)$, we must also have $k(s_2)(x)=y$.
By the definition of $k$, and because $y\neq{z}$, this means $x\in{s_2}$.
Similarly, if $x\in{s_2}$, then $k(s_2)(x)=y=k(s_1)(x)$ so $x\in{s_1}$ also.
Therefore $s_1=s_2$, and thus $k$ is injective.
\item
Suppose $f:(X\to{Y})\to{X}$ is an injection, and $Y$ has at least two distinct elements.
Then $k:\mathcal{P}(X)\to(X\to{Y})$ as defined above is an injection.
Therefore $f\circ{k}:\mathcal{P}(X)\to{X}$, being the composition of two injections, is an injection.
But in \ref{no injection from powerset} we showed no such injection exists.
We reach a contradiction, and conclude that no such injection $f$ exists.
\end{enumerate}
\section*{EXERCISES 6}
\subsection*{Exercise 6.1}
Let $R$ be the rules defining well-bracketed strings, and $I_R$ the set inductively defined by them.
The principle of rule induction is
\begin{align*}
&P([\ ])\land\null\\
&(\forall{x\in{I_R}}.\;P(x)\lonlyif{P([x])})\land\null\\
&(\forall{x,y\in{I_R}}.\;P(x)\land{P(y)}\lonlyif{P(xy)})\\
&\;\lonlyif{\forall{x\in{I_R}}.\;P(x)}
\end{align*}
Using this principle, we can show that every well-bracketed string has an equal number of left and right brackets by taking $P$ to be the property of having an equal number of left and right brackets, and proving each of the three hypotheses above.
\begin{itemize}
\item
Clearly $[\ ]$ has an equal number, $1$, of each bracket type.
\item
Suppose $x$ has an equal number, $n$, of left and right brackets.
Then $[x]$ has an equal number, $n+1$, of left and right brackets.
\item
Finally, suppose $x$ has an equal number, $n$, and $y$ has an equal number, $m$, of left and right brackets.
Then $xy$ has $n+m$ left brackets, but also $n+m$ right brackets, and thus has an equal number of each bracket type.
\end{itemize}
Therefore, every element of $I_R$, that is, every well-bracketed string, has an equal number of left and right brackets.
\subsection*{Exercise 6.6}
\begin{enumerate}[label=(\roman{*}),ref=(\roman{*})]
\item
Suppose $U\subseteq{V}$, and let $x\in\varphi(U)$.
To show $\varphi$ is monotonic is to show that $x\in\varphi(V)$.
Since $x\in\varphi(U)$, there exists an $n\in{U}$ such that either $x=3n/2$ and $n$ is even, or $x=n$ and $n$ is odd.
But since $U\subseteq{V}$, we also have $n\in{V}$.
Thus we either have $3n/2\in\varphi(V)$ if $n$ is even or $n\in\varphi(V)$ if $n$ is odd.
Therefore $x\in\varphi(V)$.
\item\label{postfixed odd}
Suppose $U$ is a postfixed point of $\varphi$.
Suppose also that $n\in{U}$ and $n$ is even.
Since $U\subseteq{\varphi(U)}$, we also have $n\in\varphi(U)$.
By definition of $\varphi$, there exists an $m\in{U}$ such that either $m$ is even and $n=3m/2$, or $m$ is odd and $n=m$.
But $n$ is even, so we must have $n=3m/2$.
Thus $m=2n/3$ is in $U$.
Again supposing that $U$ is a postfixed point, we now show that $U$ contains only odd numbers.
Let $E_U=\{m\in{U}\mid{m\text{ is even}}\}$ be the subset of even numbers in $U$, and suppose $E_U\neq\emptyset$.
Being a non-empty subset of natural numbers, $E_U$ has a least element.
Let $n\in{E_U}$ be the least element.
Note that $n\in{U}$ since $E_U\subseteq{U}$, and $n$ is even by definition of $E_U$.
We deduce from the previous paragraph that $2n/3\in{U}$.
From the details of that paragraph, it is clear that $n$ is divisible by $3$, and thus $2n/3=2(n/3)$ is even.
Thus $2n/3\in{E_U}$.
But $2n/3100)]{x\Downarrow(x-10)}{}\\[0.5mm]
\infer[(x\leq{100})]{x\Downarrow{z}}{(x+11)\Downarrow{y}&y\Downarrow{z}}
\end{align*}
We can show that $\Downarrow$ is in fact a partial function by rule induction with the following hypothesis.
\[P(x,y)\liff\forall{y'\in\mathbb{N}_0}.\;x\Downarrow{y'}\limp{y=y'}\]
Assuming $x>100$ we have $P(x,x-10)$, since only one rule applies when $x>100$ so the result must be $x-10$.
Assuming $x\leq{100}$, our instantiated inductive hypotheses are $P(x+11,y)$ and $P(y,z)$, and we must prove $x\Downarrow{z'}\limp{z=z'}$.
Only one rule applies when $x\leq{100}$, so from $x\Downarrow{z'}$ we infer that there exists a $y'$ such that $(x+11)\Downarrow{y'}$ and $y'\Downarrow{z'}$.
Now from $P(x+11,y)$ we get $y=y'$.
Then from $P(y,z)$, we additionally get $z=z'$.
Thus we conclude $P(x,z)$.
We have seen that the right hand side of any pair $x\Downarrow{y}$ in the relation is unique, so we may define a partial function by $f(x)=y\text{ if $x\Downarrow{y}$}$.
The rules for $\Downarrow$ ensure that $f$ satisfies
\[f(x)=\begin{cases}x-10&x>100\\f(f(x+11))&x\leq{100}\end{cases}\]
on each $x\in\mathbb{N}_0$ for which $f$ is defined.
In fact, $\Downarrow$, and hence $f$, is defined on all natural numbers, since being either greater or no greater than $100$ exhausts them.
Now let $P(x)$ be the property
\[P(x) \iff f(x)=\begin{cases}x-10&x>100\\91&x\leq{100}\end{cases}\]
We prove $\forall{x\in\mathbb{N}_0}.\;P(x)$ by well-founded induction on $\prec$.
It suffices to show \[\forall m.\;(\forall{n\prec m}.\;P(n))\limp P(m)\]
Let $m\in\mathbb{N}_0$ and assume $\forall{n\prec m}.\;P(n)$.
Suppose $m>100$.
Then $f(m)=m-10$ by our characterisation of $f$, and hence $P(m)$ holds.
Suppose instead that $m\leq{100}$.
Now to prove $P(m)$ is to prove $f(m)=91$.
Let $n_1=m+11$ and $n_2=f(n_1)$.
By our characterisation of $f$, we have $f(m)=f(n_2)$.
We split into cases again:
\begin{itemize}
\item $m>100$\\
From our characterisation of $f$ we have $f(m)=m-10$, thus $P(m)$ holds.
\item $90