\documentclass[12pt,titlepage]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{mathtools}
\usepackage{graphicx}
\usepackage{color}
\usepackage{ucs}
\usepackage[utf8x]{inputenc}
\usepackage{xparse}
\usepackage{hyperref}
%----Macros----------
%
% Unresolved issues:
%
% \righttoleftarrow
% \lefttorightarrow
%
% \color{} with HTML colorspec
% \bgcolor
% \array with options (without options, it's equivalent to the matrix environment)
% Of the standard HTML named colors, white, black, red, green, blue and yellow
% are predefined in the color package. Here are the rest.
\definecolor{aqua}{rgb}{0, 1.0, 1.0}
\definecolor{fuschia}{rgb}{1.0, 0, 1.0}
\definecolor{gray}{rgb}{0.502, 0.502, 0.502}
\definecolor{lime}{rgb}{0, 1.0, 0}
\definecolor{maroon}{rgb}{0.502, 0, 0}
\definecolor{navy}{rgb}{0, 0, 0.502}
\definecolor{olive}{rgb}{0.502, 0.502, 0}
\definecolor{purple}{rgb}{0.502, 0, 0.502}
\definecolor{silver}{rgb}{0.753, 0.753, 0.753}
\definecolor{teal}{rgb}{0, 0.502, 0.502}
% Because of conflicts, \space and \mathop are converted to
% \itexspace and \operatorname during preprocessing.
% itex: \space{ht}{dp}{wd}
%
% Height and baseline depth measurements are in units of tenths of an ex while
% the width is measured in tenths of an em.
\makeatletter
\newdimen\itex@wd%
\newdimen\itex@dp%
\newdimen\itex@thd%
\def\itexspace#1#2#3{\itex@wd=#3em%
\itex@wd=0.1\itex@wd%
\itex@dp=#2ex%
\itex@dp=0.1\itex@dp%
\itex@thd=#1ex%
\itex@thd=0.1\itex@thd%
\advance\itex@thd\the\itex@dp%
\makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}}
\makeatother
% \tensor and \multiscript
\makeatletter
\newif\if@sup
\newtoks\@sups
\def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}%
\def\reset@sup{\@supfalse\@sups={}}%
\def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else%
\ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}%
\else \append@sup#2 \@suptrue \fi%
\expandafter\mk@scripts\fi}
\def\tensor#1#2{\reset@sup#1\mk@scripts#2_/}
\def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2%
\reset@sup\mk@scripts#3_/}
\makeatother
% \slash
\makeatletter
\newbox\slashbox \setbox\slashbox=\hbox{$/$}
\def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$}
\@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa
\copy\slashbox \kern-\@tempdima \box\@tempboxa}
\def\slash{\protect\itex@pslash}
\makeatother
% math-mode versions of \rlap, etc
% from Alexander Perlis, "A complement to \smash, \llap, and lap"
% http://math.arizona.edu/~aprl/publications/mathclap/
\def\clap#1{\hbox to 0pt{\hss#1\hss}}
\def\mathllap{\mathpalette\mathllapinternal}
\def\mathrlap{\mathpalette\mathrlapinternal}
\def\mathclap{\mathpalette\mathclapinternal}
\def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}}
\def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}}
\def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}}
% Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2}
\let\oldroot\root
\def\root#1#2{\oldroot #1 \of{#2}}
\renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}}
% Manually declare the txfonts symbolsC font
\DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n}
\SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n}
\DeclareFontSubstitution{U}{txsyc}{m}{n}
% Manually declare the stmaryrd font
\DeclareSymbolFont{stmry}{U}{stmry}{m}{n}
\SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n}
% Manually declare the MnSymbolE font
\DeclareFontFamily{OMX}{MnSymbolE}{}
\DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n}
\SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n}
\DeclareFontShape{OMX}{MnSymbolE}{m}{n}{
<-6> MnSymbolE5
<6-7> MnSymbolE6
<7-8> MnSymbolE7
<8-9> MnSymbolE8
<9-10> MnSymbolE9
<10-12> MnSymbolE10
<12-> MnSymbolE12}{}
% Declare specific arrows from txfonts without loading the full package
\makeatletter
\def\re@DeclareMathSymbol#1#2#3#4{%
\let#1=\undefined
\DeclareMathSymbol{#1}{#2}{#3}{#4}}
\re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46}
\re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12}
\re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64}
\re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6}
\re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77}
\re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77}
\makeatother
% \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE
\makeatletter
\def\Decl@Mn@Delim#1#2#3#4{%
\if\relax\noexpand#1%
\let#1\undefined
\fi
\DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}}
\def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}}
\def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}}
\Decl@Mn@Open{\llangle}{mnomx}{'164}
\Decl@Mn@Close{\rrangle}{mnomx}{'171}
\Decl@Mn@Open{\lmoustache}{mnomx}{'245}
\Decl@Mn@Close{\rmoustache}{mnomx}{'244}
\makeatother
% Widecheck
\makeatletter
\DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}}
\def\@widecheck#1#2{%
\setbox\z@\hbox{\m@th$#1#2$}%
\setbox\tw@\hbox{\m@th$#1%
\widehat{%
\vrule\@width\z@\@height\ht\z@
\vrule\@height\z@\@width\wd\z@}$}%
\dp\tw@-\ht\z@
\@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@
\setbox\tw@\hbox{%
\raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box
\tw@}}}%
{\ooalign{\box\tw@ \cr \box\z@}}}
\makeatother
% \mathraisebox{voffset}[height][depth]{something}
\makeatletter
\NewDocumentCommand\mathraisebox{moom}{%
\IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{%
\IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}%
}{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}%
\mathpalette\@temp{#4}}
\makeatletter
% udots (taken from yhmath)
\makeatletter
\def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.}
\mkern2mu\raise4\p@\hbox{.}\mkern1mu
\raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}}
\makeatother
%% Fix array
\newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}}
%% \itexnum is a noop
\newcommand{\itexnum}[1]{#1}
%% Renaming existing commands
\newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}}
\newcommand{\widevec}{\overrightarrow}
\newcommand{\darr}{\downarrow}
\newcommand{\nearr}{\nearrow}
\newcommand{\nwarr}{\nwarrow}
\newcommand{\searr}{\searrow}
\newcommand{\swarr}{\swarrow}
\newcommand{\curvearrowbotright}{\curvearrowright}
\newcommand{\uparr}{\uparrow}
\newcommand{\downuparrow}{\updownarrow}
\newcommand{\duparr}{\updownarrow}
\newcommand{\updarr}{\updownarrow}
\newcommand{\gt}{>}
\newcommand{\lt}{<}
\newcommand{\map}{\mapsto}
\newcommand{\embedsin}{\hookrightarrow}
\newcommand{\Alpha}{A}
\newcommand{\Beta}{B}
\newcommand{\Zeta}{Z}
\newcommand{\Eta}{H}
\newcommand{\Iota}{I}
\newcommand{\Kappa}{K}
\newcommand{\Mu}{M}
\newcommand{\Nu}{N}
\newcommand{\Rho}{P}
\newcommand{\Tau}{T}
\newcommand{\Upsi}{\Upsilon}
\newcommand{\omicron}{o}
\newcommand{\lang}{\langle}
\newcommand{\rang}{\rangle}
\newcommand{\Union}{\bigcup}
\newcommand{\Intersection}{\bigcap}
\newcommand{\Oplus}{\bigoplus}
\newcommand{\Otimes}{\bigotimes}
\newcommand{\Wedge}{\bigwedge}
\newcommand{\Vee}{\bigvee}
\newcommand{\coproduct}{\coprod}
\newcommand{\product}{\prod}
\newcommand{\closure}{\overline}
\newcommand{\integral}{\int}
\newcommand{\doubleintegral}{\iint}
\newcommand{\tripleintegral}{\iiint}
\newcommand{\quadrupleintegral}{\iiiint}
\newcommand{\conint}{\oint}
\newcommand{\contourintegral}{\oint}
\newcommand{\infinity}{\infty}
\newcommand{\bottom}{\bot}
\newcommand{\minusb}{\boxminus}
\newcommand{\plusb}{\boxplus}
\newcommand{\timesb}{\boxtimes}
\newcommand{\intersection}{\cap}
\newcommand{\union}{\cup}
\newcommand{\Del}{\nabla}
\newcommand{\odash}{\circleddash}
\newcommand{\negspace}{\!}
\newcommand{\widebar}{\overline}
\newcommand{\textsize}{\normalsize}
\renewcommand{\scriptsize}{\scriptstyle}
\newcommand{\scriptscriptsize}{\scriptscriptstyle}
\newcommand{\mathfr}{\mathfrak}
\newcommand{\statusline}[2]{#2}
\newcommand{\tooltip}[2]{#2}
\newcommand{\toggle}[2]{#2}
% Theorem Environments
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\newtheorem{prop}{Proposition}
\newtheorem{cor}{Corollary}
\newtheorem*{utheorem}{Theorem}
\newtheorem*{ulemma}{Lemma}
\newtheorem*{uprop}{Proposition}
\newtheorem*{ucor}{Corollary}
\theoremstyle{definition}
\newtheorem{defn}{Definition}
\newtheorem{example}{Example}
\newtheorem*{udefn}{Definition}
\newtheorem*{uexample}{Example}
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\newtheorem{note}{Note}
\newtheorem*{uremark}{Remark}
\newtheorem*{unote}{Note}
%-------------------------------------------------------------------
\begin{document}
%-------------------------------------------------------------------
\section*{Experiments in quantum complex networks}
This page is an [[Experiments|experiment]], written by [[Tomi Johnson]]. For the time being, it should no longer be edited. Instead, ongoing work should take place on the [[Blog - quantum complex networks|blog article in progress]].
\hypertarget{quantum_complex_networks}{}\subsection*{{Quantum complex networks}}\label{quantum_complex_networks}
\emph{Guest post by \href{http://www.azimuthproject.org/azimuth/show/Tomi+Johnson}{Tomi Johnson}}
If you were to randomly click a hyperlink on this web page and keep doing so on each page that followed, where would you end up?
As an esteemed user of Azimuth, I’d like to think you browse more intelligently, but the above is the question Google asks when deciding how to rank the world’s web pages.
Recently, together with the team (Mauro Faccin, \href{http://www.azimuthproject.org/azimuth/show/Jacob+Biamonte}{Jacob Biamonte} and \href{http://migdal.wikidot.com/}{Piotr Migdał}) at the ISI Foundation in Turin, we attended a workshop where several of the attendees were asking a similar question with a twist. ``What if you, the web surfer, behaved quantum mechanically?''
Now don’t panic! I have no reason to think you might enter a superposition of locations or tunnel through a wall. This merely forms part of a recent drive towards understanding the role that network science can play in quantum physics.
As we'll find, to play with quantum networks is fun. It could also become a necessity. The size of natural systems in which quantum effects have been identified has grown steadily over the past few years. For example, attention has recently turned to explaining the remarkable efficiency of \href{http://en.wikipedia.org/wiki/Light-harvesting_complex}{light-harvesting complexes}, comprising tens of molecules and thousands of atoms, using quantum mechanics. If this expansion continues, perhaps quantum physicists will have to embrace the concepts of complex networks.
To begin studying quantum complex networks, we found a revealing toy model. Let me tell you about it. Like all good stories, it starts with a graph: this time representing the internet!
If this taster gets you interested, there are more details available online at:
\begin{itemize}%
\item M. Faccin, T. H. Johnson, J. D. Biamonte, S. Kais, and P. Migdał, \href{http://arxiv.org/abs/1305.6078}{\emph{Degree Distribution in Quantum Walks on Complex Networks}}, arXiv:1305.6078 (2013).
\end{itemize}
We'll be working through Sec II.A and Sec II.B of this paper but no further. We'll end by bounding the difference between a \textbf{stochastic walk} (like the randomly clicking web surfer mentioned above) and a corresponding \textbf{quantum walk}, in terms of the energy of the walker.
\hypertarget{what_does_the_internet_look_like_from_above}{}\subsubsection*{{What does the internet look like from above?}}\label{what_does_the_internet_look_like_from_above}
As we all know, the idea of the internet is to connect computers to each other. What do these connections look like when abstracted as a network, with each computer a node and each connection an edge?
The internet on a local scale, such as in your house or office, might look something like this
with several devices connected to a central hub. Each hub connects to other hubs, and so the internet on a slightly larger scale might look something like this
What about the full global, not local, structure of the internet? To answer this question, researchers have developed representations of the whole internet, such as this one
While such representations are pretty awe inspiring, how can we make any sense of them? (Or are they merely excellent desktop wallpapers and new-age artworks?)
In terms of complex network theory, there's actually a lot that can be said that is not immediately obvious from the above representation.
For example, we find something very interesting if we plot the number of web pages with different incoming links (called \textbf{degree}) on a log-log axis. What is found is the following
This shows that very few computers are connected to a very large number others, while a very large number of computers have very few connections. More precisely, what this shows is a \textbf{power law distribution}, the signature of which is a straight line on a log-log axis.
In fact, power law distributions arise in a diverse number of real world networks, human-built networks such as the internet and naturally occurring networks. It is often discussed alongside the concept of the \href{http://en.wikipedia.org/wiki/Preferential_attachment}{preferential attachment}; highly connected nodes seem to accumulate connections more quickly. We all know of a successful blog whose success had led to an increased presence and more success. That's an example of preferential attachment.
It's clear then that degree is an important concept in network theory, and its distribution across the nodes a useful characteristic of a network. Degree gives one indication of how important a node is in a network.
And this is where stochastic walks come in. Google, who are in the business of ranking the importance of nodes (web pages) in a network (the web), use (up to a \href{http://en.wikipedia.org/wiki/Google_matrix}{small modification}) the idealized model of a stochastic walker (web surfer) who randomly hops to connected nodes (follows one of the links on a page). This is called the \textbf{uniform escape model}, since the total rate of leaving any node (web page) is set to be the same for all nodes (pages). Leaving the walker (surfer) to wander (surf) for a long while, Google then takes the probability of the walker (surfer) being on a node (page) to rank the importance of that node (page). In the case that the network is undirected (all links are reciprocated) this long-time probability, and therefore the rank of the node (page), is proportional to the degree of the node (page).
So node degrees and the uniform escape model play an important role in the fields of complex networks and stochastic walks. But can they tell us anything about the much more poorly understood topics of quantum networks and quantum walks? In fact, yes, and demonstrating that to you is the purpose of this post.
Before we move on to the interesting bit, the math, it's worth just listing a few properties of quantum walks that make them hard to analyze, and explaining why they are poorly understood. These are the difficulties we will show how to overcome below.
\begin{itemize}%
\item \emph{No convergence}. In a stochastic walk, if you leave the walker to wander for a long time, eventually the probability of finding a walker at a node converges to a constant value. In a quantum walk, this doesn't happen, so the walk can't be characterized by its long-time properties.
\item \emph{Dependence on initial states}. In some stochastic walks the long-time properties of the walk are independent of the initial state. It is possible to characterize the stochastic walk without referring to the initialization of the walker. No such characterization is possible in quantum walks, properties always depends on the initialization of the walker. Is it even possible then to say something useful that applies to all initializations?
\item \emph{Stochastic and quantum generators differ}. Those of you familiar with the \href{http://math.ucr.edu/home/baez/networks/}{network theory series} (for example \href{http://johncarlosbaez.wordpress.com/2011/11/04/network-theory-part-16/}{part 16}) know that some generators produce both stochastic and quantum walks. However, most stochastic walk generators, including that for the uniform escape model, do not generate quantum walks and vice versa. How do we then compare stochastic and quantum walks when their generators differ?
\end{itemize}
With the task outlined, let's get started!
\hypertarget{graphs_and_walks}{}\subsubsection*{{Graphs and walks}}\label{graphs_and_walks}
In the next couple of sections I'm going to explain the diagram below to you. If you’ve been following the \href{http://math.ucr.edu/home/baez/networks/}{network theory series}, in particular \href{http://math.ucr.edu/home/baez/networks/networks_20.html}{part 20}, you’ll find parts of it familiar. But as it's been a while since the last post covering this topic, let's start with the basics.
A \href{http://en.wikipedia.org/wiki/Simple_graph#Simple_graph}{\textbf{simple graph}} $G$ can be used to define both stochastic and quantum walks. A simple graph is something like this
where there is at most one edge between any two nodes, there are no edges from a node to itself and all edges are \textbf{undirected}. To avoid complications, let’s stick to simple graphs with a \textbf{finite} number $n$ of nodes. Let’s also assume you can get from every node to every other node via some combination of edges i.e. the graph is \textbf{connected}.
In the particular example above the graph represents a network of $n = 5$ nodes, where nodes 3 and 4 have degree (number of edges) 3, and nodes 1, 2 and 5 have degree 2.
Every simple graph defines a matrix $A$, called the \textbf{adjacency matrix}. For a network with $n$ nodes, this matrix is of size $n \times n$, and each element $A_{i j}$ is unity if there is an edge between nodes $i$ and $j$, and zero otherwise (let's use this basis for the rest of this post). For the graph drawn above the adjacency matrix is
\begin{displaymath}
\left( \begin{matrix}
0 & 1 & 0 & 1 & 0 \\
1 & 0 & 1 & 0 & 0 \\
0 & 1 & 0 & 1 & 1 \\
1 & 0 & 1 & 0 & 1 \\
0 & 0 & 1 & 1 & 0
\end{matrix} \right)
\end{displaymath}
By construction, every adjacency matrix is \textbf{symmetric} $A =A^T$ (the $T$ means the transposition of the elements in the node basis) and further, because each $A$ is real, it is \textbf{self-adjoint} $A=A^\dagger$ (the $\dagger$ means conjugate transpose).
This is nice, since (as seen in parts \href{http://math.ucr.edu/home/baez/networks/networks_16.html}{16} and \href{http://math.ucr.edu/home/baez/networks/networks_20.html}{20}) a self-adjoint matrix generates a continuous-time \textbf{quantum walk}.
To recap from the \href{http://math.ucr.edu/home/baez/networks/}{series}, a quantum walk is an evolution arising from a quantum walker moving on a network.
A \textbf{state} of a quantum walk is represented by a size $n$ complex column vector $\psi$. Each element $\langle i , \psi \rangle$ of this vector is the so-called \textbf{amplitude} associated with node $i$ and the \textbf{probability} of the walker being found on that node (if measured) is the modulus of the amplitude squared $|\langle i , \psi \rangle|^2$. Here $i$ is the standard basis vector with a single non-zero $i$-th entry equal to unity, and $\langle u , v \rangle = u^\dagger v$ is the usual inner vector product.
A quantum walk evolves in time according to the \textbf{Schrödinger equation}
\begin{displaymath}
\frac{d}{d t} \psi(t)= - i H \psi(t)
\end{displaymath}
where $H$ is called the \textbf{Hamiltonian}. If the initial state is $\psi(0)$ then the solution is written as
\begin{displaymath}
\psi(t) = \exp(- i t H) \psi(0)
\end{displaymath}
The probabilities $\{ | \langle i , \psi (t) \rangle |^2 \}_i$ are guaranteed to be correctly normalized when the Hamiltonian $H$ is self-adjoint.
There are other matrices that are defined by the graph. Perhaps the most familiar is the \textbf{Laplacian}, which has recently been a topic on this blog (see parts \href{http://math.ucr.edu/home/baez/networks/networks_15.html}{15}, \href{http://math.ucr.edu/home/baez/networks/networks_16.html}{16} and \href{http://math.ucr.edu/home/baez/networks/networks_20.html}{20} of the \href{http://math.ucr.edu/home/baez/networks/}{series}, and this \href{http://johncarlosbaez.wordpress.com/2013/05/19/graph-laplacians/}{recent post}).
The Laplacian $L$ is the $n \times n$ matrix $L = D - A$, where the \textbf{degree matrix} $D$ is an $n \times n$ diagonal matrix with elements given by the degrees
\begin{displaymath}
D_{i i}=\sum_{j} A_{i j}
\end{displaymath}
For the graph drawn above the degree matrix and Laplacian are
\begin{displaymath}
\left( \begin{matrix}
2 & 0 & 0 & 0 & 0 \\
0 & 2 & 0 & 0 & 0 \\
0 & 0 & 3 & 0 & 0 \\
0 & 0 & 0 & 3 & 0 \\
0 & 0 & 0 & 0 & 2
\end{matrix} \right)
\qquad \mathrm{and} \qquad
\left( \begin{matrix}
2 & -1 & 0 & -1 & 0 \\
-1 & 2 & -1 & 0 & 0 \\
0 & -1 & 3 & -1 & -1 \\
-1 & 0 & -1 & 3 & -1 \\
0 & 0 & -1 & -1 & 2
\end{matrix} \right)
\end{displaymath}
The Laplacian is self-adjoint and generates a quantum walk.
The Laplacian has another property; it is \textbf{infinitesimal stochastic}. This means that its off diagonal elements are non-positive and its columns sum to zero. This is interesting because an infinitesimal stochastic matrix generates a continuous-time \textbf{stochastic walk}.
To recap from the \href{http://math.ucr.edu/home/baez/networks/}{series}, a stochastic walk is an evolution arising from a stochastic walker moving on a network.
A \textbf{state} of a stochastic walk is represented by a size $n$ non-negative column vector $\psi$. Each element $\langle i , \psi \rangle$ of this vector is the \textbf{probability} of the walker being found on node $i$.
A stochastic walk evolves in time according to the \textbf{master equation}
\begin{displaymath}
\frac{d}{d t} \psi(t)= - H \psi(t)
\end{displaymath}
where $H$ is called the stochastic \textbf{Hamiltonian}. If the initial state is $\psi(0)$ then the solution is written
\begin{displaymath}
\psi(t) = \exp(- t H) \psi(0)
\end{displaymath}
The probabilities $\{ \langle i , \psi (t) \rangle \}_i$ are guaranteed to be non-negative and correctly normalized when the stochastic Hamiltonian $H$ is infinitesimal stochastic.
So far, I have just presented what has been covered on Azimuth previously. However, to analyze the important uniform escape model we need to go beyond the class of (\href{http://math.ucr.edu/home/baez/networks/networks_16.html}{Dirichlet}) generators that produce both quantum and stochastic walks. Further, we have to somehow find a related quantum walk. We'll see below that both tasks are achieved by considering the normalized Laplacians: one generating the uniform escape stochastic walk and the other a related quantum walk.
\hypertarget{normalized_laplacians}{}\subsubsection*{{Normalized Laplacians}}\label{normalized_laplacians}
The two normalized Laplacians are
\begin{itemize}%
\item the \textbf{asymmetric normalized Laplacian} $S = L D^{-1}$ (that generates the uniform escape $S$tochastic walk) and
\item the \textbf{symmetric normalized Laplacian} $Q = D^{-1/2} L D^{-1/2}$ (that generates a $Q$uantum walk).
\end{itemize}
For the graph drawn above the asymmetric normalized Laplacian $S$ is
\begin{displaymath}
\left( \begin{matrix}
1 & -1/2 & 0 & -1/3 & 0 \\
-1/2 & 1 & -1/3 & 0 & 0 \\
0 & -1/2 & 1 & -1/3 & -1/2 \\
-1/2 & 0 & -1/3 & 1 & -1/2 \\
0 & 0 & -1/3 & -1/3 & 1
\end{matrix} \right)
\end{displaymath}
The identical diagonal elements indicates that the total rates of leaving each node are identical, and the equality within each column of the other non-zero elements indicates that the walker is equally likely to hop to any node connected to its current node. This is the uniform escape model!
For the same graph the symmetric normalized Laplacian $Q$ is
\begin{displaymath}
\left( \begin{matrix}
1 & -1/2 & 0 & -1/\sqrt{6} & 0 \\
-1/2 & 1 & -1/\sqrt{6} & 0 & 0 \\
0 & -1/\sqrt{6} & 1 & -1/3 & -1/\sqrt{6} \\
-1/\sqrt{6} & 0 & -1/3 & 1 & -1/\sqrt{6} \\
0 & 0 & -1/\sqrt{6} & -1/\sqrt{6} & 1
\end{matrix} \right)
\end{displaymath}
\begin{description}
\item[That the diagonal elements are identical in the quantum case indicates that all nodes are of equal energy, this is type of quantum walk usually considered.] \textbf{Puzzle 1.} Show that in general $S$ is infinitesimal stochastic but not self-adjoint.
\textbf{Puzzle 2.} Show that in general $Q$ is self-adjoint but not infinitesimal stochastic.
\end{description}
So a graph defines two matrices: one $S$ that generates a stochastic walk, and one $Q$ that generates a quantum walk. The natural question to ask is whether these walks are related. The answer is that they are!
Underpinning this relationship is the mathematic property that $S$ and $Q$ are \href{http://en.wikipedia.org/wiki/Matrix_similarity}{similar}. They are related by the following similarity transformation
\begin{displaymath}
S = D^{1/2} Q D^{-1/2},
\end{displaymath}
which means that any eigenvector $\phi_k^i$ of $Q$ associated to eigenvalue $\epsilon_k$ implies that $\pi_k^i \propto D^{1/2} \phi_k^i$ is an eigenvector of $S$ associated to the same eigenvalue. To show this, insert identity $I = D^{-1/2} D^{1/2}$ into
\begin{displaymath}
Q \phi_k^i = \epsilon_k \phi_k^i
\end{displaymath}
and multiply from the left with $D^{1/2}$ to obtain
\begin{displaymath}
\begin{aligned}
(D^{1/2} Q D^{-1/2} ) (D^{1/2} \phi_k^i ) &= \epsilon_k ( D^{1/2} \phi_k^i ) \\ S \pi_k^i &= \epsilon_k \pi_k^i \end{aligned}
\end{displaymath}
The same works in the opposite direction. Any eigenvector $\pi_k^i$ of $S$ implies an eigenvector $\phi_k^i \propto D^{-1/2} \pi_k^i$ of $Q$ associated to the same eigenvalue $\epsilon_k$.
The mathematics is also particularly nice because $Q$ is self-adjoint. A self-adjoint matrix is \href{http://en.wikipedia.org/wiki/Diagonalizable_matrix}{diagonalizable}, and has real eigenvalues and orthogonal eigenvectors.
As a result, the symmetric normalized Laplacian can be decomposed as
\begin{displaymath}
Q = \sum_k \epsilon_k \Phi_k
\end{displaymath}
where $\epsilon_k$ is real and $\Phi_k$ are orthogonal \href{http://en.wikipedia.org/wiki/Projection_%28linear_algebra%29#Properties_and_classification}{projectors}. Each $\Phi_k$ acts as identity only on vectors in the space spanned by $\{ \phi_k^i \}_i$ and as zero on all others, such that $\Phi_k \Phi_l = \delta_{k l} \Phi_k$.
Multiplying from the left by $D^{1/2}$ and the right by $D^{-1/2}$ results in a similar decomposition for $S$
\begin{displaymath}
S = \sum_k \epsilon_k \Pi_k
\end{displaymath}
with orthogonal projectos $\Pi_k = D^{1/2} \Phi_k D^{-1/2}$.
We now have all the ingredients necessary to study the walks generated by the normalized Laplacians and discover the relationship between them. I promised above that I would explain the following diagram
Let's summarize what it represents now:
\begin{itemize}%
\item $G$ is a simple graph that specifies
\item $A$ the adjacency matrix (generator of a quantum walk), which subtracted from
\item $D$ the diagonal matrix of the degrees gives
\item $L$ the symmetric Laplacian (generator of stochastic and quantum walks), which when normalized by $D$ returns both
\item $S$ the generator of the uniform escape stochastic walk and
\item $Q$ the quantum walk generator to which it is similar!
\end{itemize}
If this has only warmed you up, next I’ll talk you through the mathematics of the uniform escape stochastic walk $S$ and how it connects to the degrees of the nodes in the long-time limit. Then I’ll show you how this helps us solve aspects of the quantum walk generated by $Q$.
\hypertarget{stochastic_walk}{}\subsubsection*{{Stochastic walk}}\label{stochastic_walk}
The uniform escape stochastic walk generated by $S$ is popular because it has a \emph{really} useful \textbf{stationary state}.
To recap from \href{http://math.ucr.edu/home/baez/networks/networks_20.html}{part 20} in the \href{http://math.ucr.edu/home/baez/networks/}{series}, a stationary state of a stochastic walk is one that does not change in time. From the master equation $\frac{d}{d t} \psi(t) = -S \psi(t)$ the stationary state must be an eigenvector $\pi_0^i$ of $S$ with eigenvalue $\epsilon_0 = 0$.
A fantastic pair of theorems hold:
\begin{itemize}%
\item There is always a unique (up to multiplication by a positive number) positive eigenvector $\pi_0$ (abbreviated as $\pi$) of $S$ with eigenvalue $\epsilon_0 = 0$, i.e., a \textbf{unique stationary state} $\pi$.
\item Regardless of the initial state $\psi(0)$, the stationary state $\pi$ is obtained in the long-time limit $\lim_{t \rightarrow \infty} \psi(t) = \pi$.
\end{itemize}
To find this unique stationary state, consider the Laplacian $L$, which is both infinitesimal stochastic and symmetric. Among other things, this means the rows of $L$ sum to zero
\begin{displaymath}
\sum_j L_{i j} = 0
\end{displaymath}
which means the all ones vector $\mathbf{1}$ is an eigenvector of $L$ with zero eigenvalue
\begin{displaymath}
L \mathbf{1} = 0
\end{displaymath}
Inserting the identity $I = D^{-1} D$ into this equation we then find $D \mathbf{1}$ is a zero eigenvector of $S$
\begin{displaymath}
L \mathbf{1} = ( L D^{-1} ) ( D \mathbf{1} ) = S ( D \mathbf{1} ) = 0
\end{displaymath}
Therefore we just need to normalize this to get the long-time stationary state of the walk
\begin{displaymath}
\pi = \frac{D \mathbf{1}}{\sum_i D_{i i}}
\end{displaymath}
Each element $\langle i , \pi \rangle$ of this state, the long-time probability of finding a walker at node $i$, is proportional to the degree $D_{i i}$ of node $i$!
We can check this holds for the graph drawn above, where $\pi$ is
\begin{displaymath}
\left( \begin{matrix}
1/6 \\
1/6 \\
1/4 \\
1/4 \\
1/6
\end{matrix} \right)
\end{displaymath}
which implies long-time probability $1/6$ for nodes $1$, $2$ and $5$, and $1/4$ for nodes $3$ and $4$. Comparing this to the original graph
this exactly reflects the arrangement of degrees, as we knew it must. Math works!
\hypertarget{quantum_walk}{}\subsubsection*{{Quantum walk}}\label{quantum_walk}
Next up is the quantum walk generated by $Q$. Not a lot is known about quantum walks on networks of arbitrary geometry, but below we’ll see some analytical results are obtained by exploiting the similarity of $S$ and $Q$.
Where to start? Well, let's start from the bottom, what quantum physicists call the \textbf{ground state}. In contrast to stochastic walks, for a quantum walk every eigenvector $\phi_i^k$ of $Q$ is a \textbf{stationary state} of the quantum walk (in puzzle 7, at the bottom of this page, I ask you to prove this). The stationary state $\phi_0$ is of particular interest physically and mathematically. Physically, since eigenvectors of the $Q$ correspond to states of well-defined energy equal to the associated eigenvalue, $\phi_0$ is the state of lowest energy $\epsilon_0 = 0$, hence the name ground state (in puzzle 5 we ask you to prove that all eigenvalues of $Q$ are non-negative, so zero really does correspond to the ground state).
Mathematically, the relationship between eigenvectors implied by the similarity of $S$ and $Q$ means $\phi_0 \propto D^{-1/2} \pi \propto D^{1/2} \mathbf{1}$. So, if in the ground state, the probability of being measured to be at node $i$ is $| \langle i , \phi_0 \rangle |^2 \propto | \langle i , D^{1/2} \rangle |^2 = D_{i i}$. Amazingly, this probability is proportional to the degree and so is exactly the same as $\langle i , \pi \rangle$, the probability in the stationary state $\pi$ of the stochastic walk.
A zero energy quantum walk $Q$ leads to exactly the same distribution of the walker over the nodes as in the long-time limit of the uniform escape stochastic walk $S$.
The normally classical notion of degree distribution plays a role in quantum walks!
This is already pretty exciting. But what else can we say? If you are someone who feels faint at the sight of quantum mechanics, well done for getting this far, but be prepared for what's coming next.
What if the walker starts in some other initial state? Is there some quantum walk analogue of the unique long-time state of a stochastic walk?
In fact, the quantum walk in general does not converge to a stationary state. But there is a probability distribution that can be thought to characterize the quantum walk in the same way as the long-time state characterizes the stochastic walk. It's the \textbf{long-time average probability vector} $P$.
If you didn't know the time that had passed since the beginning of a quantum walk, then the best estimate for the probability of your measuring the walker to be at node $i$ would be the long-time average probability
\begin{displaymath}
\langle i , P \rangle = \lim_{T \rightarrow \infty} \frac{1}{T} \int_0^T | \psi_i (t) |^2 d t
\end{displaymath}
There’s a bit that we can do to simplify this expression. As always, lets start with the trick of inserting the decomposition $Q= \sum_k \epsilon_k \Phi_k$ into $\psi(t) = e^{-Q t} \psi(0)$ to get $\psi(t) = \sum_k e^{-\epsilon_k t} \Phi_k \psi(0)$ and thus
\begin{displaymath}
\langle i , P \rangle = \lim_{T \rightarrow \infty} \frac{1}{T} \int_0^T | \sum_k e^{-i \epsilon_k t} \langle i, \Phi_k \psi (0) \rangle |^2 d t
\end{displaymath}
Due to the integral over all time the interferences between terms corresponding to different eigenvalues average to zero, leaving
\begin{displaymath}
\langle i , P \rangle = \sum_k | \langle i, \Phi_k \psi(0) \rangle |^2
\end{displaymath}
The long-time average probability is then the sum of terms contributed by the projections of the initial state onto each eigenspace.
So we have a distribution that characterizes a quantum walk for a general initial state but it's a complicated beast. What can we say about it?
Our best hope of understanding the long-time average probability is through the term $| \langle i, \Phi_0 \psi (0) \rangle |^2$ associated with the zero energy eigenspace, since we know everything about this space.
For example, we know the zero energy eigenspace is one-dimensional and spanned by the eigenvector $\phi_0$. This means that the projector is just the usual outer product
\begin{displaymath}
\Phi_0 = | \phi_0 \rangle \langle \phi_0 | = \phi_0 \phi_0^\dagger
\end{displaymath}
where we have normalized $\phi_0$ according to the inner product $\langle \phi_0, \phi_0\rangle = 1$. (If you're wondering why I'm using all these angled brackets, well, it's a type of notation named after \href{http://en.wikipedia.org/wiki/Bra–ket_notation}{Dirac} that is adored by quantum physicists.)
The zero eigenspace contribution to the long-time average probability then breaks nicely into two
\begin{displaymath}
| \langle i, \Phi_0 \psi (0) \rangle |^2 = | \langle i, \phi_0\rangle \langle \phi_0, \psi (0) \rangle |^2 = | \langle i, \phi_0\rangle |^2 | \langle \phi_0 , \psi (0) \rangle |^2 = \langle i , \pi \rangle | \langle \phi_0 , \psi (0) \rangle |^2
\end{displaymath}
This is just the product of two probabilities!
First the probability $\langle i , \pi \rangle$ for a quantum state in the zero energy eigenspace to be at node $i$ (as we found above) and second the probability $| \langle \phi_0, \psi (0)\rangle |^2$ of being in this eigenspace to begin with (in quantum mechanics the probability of measuring the system to have an energy is the modulus squared of the projection of the state onto the associated eigenspace, which for the one dimensional zero energy eigenspace means just the inner product with the ground state.)
This is all we need to say something interesting about the long-time average probability for all states. We've basically shown that we can break the long-time probability vector $P$ into a sum of two normalized probability vectors
\begin{displaymath}
P = (1- \eta) \pi + \eta \Omega
\end{displaymath}
the first $\pi$ being the degree dependent stochastic stationary state associated with the zero energy eigenspace and the second associated with the higher energy eigenspaces $\Omega$, with
\begin{displaymath}
\langle i , \Omega \rangle = \frac{ \sum_{k\neq 0} | \langle i, \Phi_k \psi (0) \rangle |^2 }{ \eta}
\end{displaymath}
The weight of each term is governed by the parameter
\begin{displaymath}
\eta = 1 - | \langle \phi_0, \psi (0)\rangle |^2
\end{displaymath}
which you could think of as the \textbf{quantumness} of the result. This is unity minus the probability of the walker being in the zero energy eigenspace, or equivalently the probability of the walker being outside the zero energy eigenspace.
\begin{description}
\item[So even though we don't know anything about $\Omega$ we know its importance is controlled by a parameter $\eta$ that governs how close the long-time average distribution $P$ of the quantum walk is to the corresponding stochastic stationary distribution $\pi$. What do we mean by close? Find out for yourself:] \textbf{Puzzle 3.} Show, using a triangle inequality, that the \href{http://en.wikipedia.org/wiki/Trace_distance}{trace distance} between the two characteristic stochastic and quantum distributions $\{ \langle i , P \rangle \}_i$ and $\{ \langle i , \pi \rangle \}_i$ is upper-bounded by $2 \eta$.
\end{description}
Can we say anything physical about when the quantumness $\eta$ is big or small?
Because the eigenvalues of $Q$ have a physical interpretation in terms of energy, the answer is yes. The quantumness $\eta$ is the probability of being outside the zero energy state. Call the next lowest eigenvalue $\Delta = \min_{k \neq 0} \epsilon_k$ the energy gap. If the quantum walk is not in the zero energy eigenspace then it must be in an eigenspace of energy greater or equal to $\Delta$. Therefore the expected energy $E$ of the quantum walker must bound the quantumness $E \ge \eta \Delta$.
This tells us that a quantum walk with a low energy is similar to a stochastic walk in the long-time limit (we already knew this exactly in the zero energy limit).
So little is known about quantum walks on networks of arbitrary geometry, that we were very pleased to find this result. It says there is a special case in which the walk is characterized by the degree distribution of the network and gives us a clear physical parameter that bounds how far the walk is from this special case.
Also, in finding it we learnt that the difficulties of the initial state dependence, enhanced by the lack of convergence to a stationary state, could be overcome for a quantum walk, and that the relationships between quantum and stochastic walks extend beyond those with shared generators.
\hypertarget{what_next}{}\subsubsection*{{What next?}}\label{what_next}
That’s all for the latest bit of idea sharing at the interface between stochastic and quantum systems.
I hope I’ve piqued your interest about quantum walks. There’s so much still left to work out about this topic, and your help is needed!
Other questions we have include: What holds analytically about the form of the quantum correction? Numerically it is known that the so-called quantum correction $\Omega$ tends to enhance the probability of being found on nodes of low degree compared to $\pi$, can someone explain why? What happens if a small amount of stochastic noise is added to a quantum walk? Or a lot of noise?
It’s difficult to know who is best placed to answer these questions: experts in quantum physics, graph theory, complex networks or stochastic processes? I suspect it’ll take a bit of help from everyone.
\hypertarget{in_other_news}{}\subsubsection*{{In other news}}\label{in_other_news}
To close, the ISI team recently attended (in fact helped organize) a workshop at IQC on the topic of quantum complex networks. Needless to say, there were talks on papers related to quantum mechanics and networks!
Some researchers at the workshop gave exciting talks based on numerical examinations of what happens if a quantum walk is used instead of a stochastic walk to rank the nodes of a network:
\begin{itemize}%
\item G. D. Paparo and M. A. Martin-Delgado, \href{http://www.nature.com/srep/2012/120608/srep00444/full/srep00444.html?}{\emph{Google in a Quantum Network}}, Sci. Rep. \textbf{2}, 444 (2012).
\item E. Sánchez-Burillo, J. Duch, J. Gómez-Gardenes, and D. Zueco, \href{http://www.nature.com/srep/2012/120828/srep00605/full/srep00605.html}{\emph{Quantum Navigation and Ranking in Complex Networks}}, Sci. Rep. \textbf{2}, 605 (2012).
\end{itemize}
Others attending the workshop have numerically examined what happens when using quantum computers to represent the stationary state of a stochastic process
\begin{itemize}%
\item S. Garnerone, P. Zanardi, and D. A. Lidar, \href{http://prl.aps.org/abstract/PRL/v108/i23/e230506}{\emph{Adiabatic quantum algorithm for search engine ranking}}, Phys. Rev. Lett. \textbf{108}, 230506 (2012); \href{http://arxiv.org/abs/1109.6546}{arXiv: 1109.6546}.
\end{itemize}
It was a fun workshop and we plan to organize/attend more in the future.
\hypertarget{background_reading}{}\subsubsection*{{Background reading}}\label{background_reading}
A couple of text books with comprehensive sections on non-negative matrices and continuous-time stochastic processes are:
\begin{itemize}%
\item P. Lancaster and M. Tismenetsky \href{http://store.elsevier.com/The-Theory-of-Matrices/Peter-Lancaster/isbn-9780124355606/}{\emph{The theory of matrices: with applications}}, 2nd Ed. (Academic Press, San Diego, 1985).
\item J. R. Norris, \href{http://dx.doi.org/10.1017%2FCBO9780511810633}{\emph{Markov Chains}} (Cambridge University Press, Cambridge, 1997).
\end{itemize}
There is, of course, the book that arose from the Azimuth \href{http://math.ucr.edu/home/baez/networks/}{network theory series}, which considers several relationships between quantum and stochastic processes on networks:
\begin{itemize}%
\item J. Baez and J. Biamonte, \href{http://math.ucr.edu/home/baez/stoch_stable.pdf}{\emph{A Course on Quantum Techniques for Stochastic Mechanics}}, arXiv:1209.3632 (2012).
\end{itemize}
Another couple on complex networks are:
\begin{itemize}%
\item M. Newman, \href{http://oup.com/us/catalog/general/subject/Physics/?view=usa&ci=9780199206650}{\emph{Networks: An Introduction}} (Oxford University Press, Oxford, 2010).
\item E. Estrada, \href{http://ukcatalogue.oup.com/product/academic/physics/9780199591756.do?}{\emph{The Structure of Complex Networks: Theory and Applications}}, (Oxford University Press, Oxford, 2011). Note that the first chapter is free online.
\end{itemize}
\hypertarget{puzzles_for_the_enthusiastic}{}\subsubsection*{{Puzzles for the enthusiastic}}\label{puzzles_for_the_enthusiastic}
\hypertarget{stochastic_walks_and_stationary_states}{}\paragraph*{{Stochastic walks and stationary states}}\label{stochastic_walks_and_stationary_states}
\begin{description}
\item[Sadly I didn't have space to show proofs of all the theorems I used. So here are a few puzzles that guide you to doing the proofs for yourself:] \textbf{Puzzle 4.} (For the hard core) Prove there is always a unique positive eigenvector for a stochastic walk generated by $S$. You’ll need the assumption that the graph $G$ is connected. It's not simple, and you’ll probably need help from a book, perhaps one of those above by Lancaster and Tismenetsky, and Norris.
\textbf{Puzzle 5.} Show that the eigenvalues of $S$ (and therefore $Q$) are non-negative i.e. $\epsilon_k \ge 0$. A good way to start this proof is to apply the \href{http://en.wikipedia.org/wiki/Perron–Frobenius_theorem}{Perron-Frobenius theorem} to the non-negative matrix $M = - S + I \max_i S_{i i}$. This means $M$ has a positive eigenvalue $r$ equal to its spectral radius ($r = \max_k | \lambda_k |$ where $\{ \lambda_k \}_k$ are the eigenvalues of $M$), and the associated eigenvector $v$ is positive. Since $S = - M + I \max_i S_{i i}$, it follows that $S$ shares the eigenvectors of $M$ and the associated eigenvalues are related by inverted translation $\epsilon_k = - \lambda_k + \max_i S_{i i}$.
\textbf{Puzzle 6.} Prove that regardless of the initial state $\psi(0)$, the zero eigenvector $\pi$ is obtained in the long-time limit $\lim_{t \rightarrow \infty} \psi(t) = \pi$ of the walk generated by $S$. This breaks down into three parts:
\textbf{(a)} Using the approach from puzzle 5, to show that $S v = \epsilon_v v$, the positivity of $v$ and the infinitesimal stochastic property $\sum_i S_{i j} = 0$ imply that $\epsilon_v = \epsilon_0 = 0$ and thus $v = \pi$ is actually the unique zero eigenvector and stationary state of $S$ (its uniqueness follows from puzzle 4, you don’t need to re-prove it).
\textbf{(b)} By inserting the decomposition $S = \sum_k \epsilon_k \Pi_k$ into $e^{-S t}$ and using the result of puzzle 5, complete the proof.
\end{description}
(Though I ask you to use the diagonalizability of $S$, the main results still hold if the generator is irreducible but not diagonalizable.)
\hypertarget{quantum_walks}{}\paragraph*{{Quantum walks}}\label{quantum_walks}
\begin{description}
\item[Here are a couple of extra puzzles for those of you interested in quantum mechanics:] \textbf{Puzzle 7.} In quantum mechanics, probabilities are given by the moduli squared of amplitudes, so multiplying a state by a number of modulus unity has no physical effect. By inserting $Q= \sum_k \epsilon_k \Phi_k$ into the quantum evolution matrix $e^{-Q t}$ show that if $\psi(0) = \phi_i^k$ then $\psi(t) = e^{ - i \epsilon_k t} \psi(0)$, hence $\phi_i^k$ is a stationary state as probabilities don't change in time.
\textbf{Puzzle 8.} By expanding the initial state $\psi(0)$ in terms of the complete orthogonal basis vectors $\{ \phi_k^i \}_i$, show that for a quantum walk $\psi(t)$ never converges to a stationary state unless it began in one.
\end{description}
\end{document}