\documentclass[12pt,titlepage]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{mathtools}
\usepackage{graphicx}
\usepackage{color}
\usepackage{ucs}
\usepackage[utf8x]{inputenc}
\usepackage{xparse}
\usepackage{hyperref}
%----Macros----------
%
% Unresolved issues:
%
% \righttoleftarrow
% \lefttorightarrow
%
% \color{} with HTML colorspec
% \bgcolor
% \array with options (without options, it's equivalent to the matrix environment)
% Of the standard HTML named colors, white, black, red, green, blue and yellow
% are predefined in the color package. Here are the rest.
\definecolor{aqua}{rgb}{0, 1.0, 1.0}
\definecolor{fuschia}{rgb}{1.0, 0, 1.0}
\definecolor{gray}{rgb}{0.502, 0.502, 0.502}
\definecolor{lime}{rgb}{0, 1.0, 0}
\definecolor{maroon}{rgb}{0.502, 0, 0}
\definecolor{navy}{rgb}{0, 0, 0.502}
\definecolor{olive}{rgb}{0.502, 0.502, 0}
\definecolor{purple}{rgb}{0.502, 0, 0.502}
\definecolor{silver}{rgb}{0.753, 0.753, 0.753}
\definecolor{teal}{rgb}{0, 0.502, 0.502}
% Because of conflicts, \space and \mathop are converted to
% \itexspace and \operatorname during preprocessing.
% itex: \space{ht}{dp}{wd}
%
% Height and baseline depth measurements are in units of tenths of an ex while
% the width is measured in tenths of an em.
\makeatletter
\newdimen\itex@wd%
\newdimen\itex@dp%
\newdimen\itex@thd%
\def\itexspace#1#2#3{\itex@wd=#3em%
\itex@wd=0.1\itex@wd%
\itex@dp=#2ex%
\itex@dp=0.1\itex@dp%
\itex@thd=#1ex%
\itex@thd=0.1\itex@thd%
\advance\itex@thd\the\itex@dp%
\makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}}
\makeatother
% \tensor and \multiscript
\makeatletter
\newif\if@sup
\newtoks\@sups
\def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}%
\def\reset@sup{\@supfalse\@sups={}}%
\def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else%
\ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}%
\else \append@sup#2 \@suptrue \fi%
\expandafter\mk@scripts\fi}
\def\tensor#1#2{\reset@sup#1\mk@scripts#2_/}
\def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2%
\reset@sup\mk@scripts#3_/}
\makeatother
% \slash
\makeatletter
\newbox\slashbox \setbox\slashbox=\hbox{$/$}
\def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$}
\@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa
\copy\slashbox \kern-\@tempdima \box\@tempboxa}
\def\slash{\protect\itex@pslash}
\makeatother
% math-mode versions of \rlap, etc
% from Alexander Perlis, "A complement to \smash, \llap, and lap"
% http://math.arizona.edu/~aprl/publications/mathclap/
\def\clap#1{\hbox to 0pt{\hss#1\hss}}
\def\mathllap{\mathpalette\mathllapinternal}
\def\mathrlap{\mathpalette\mathrlapinternal}
\def\mathclap{\mathpalette\mathclapinternal}
\def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}}
\def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}}
\def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}}
% Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2}
\let\oldroot\root
\def\root#1#2{\oldroot #1 \of{#2}}
\renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}}
% Manually declare the txfonts symbolsC font
\DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n}
\SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n}
\DeclareFontSubstitution{U}{txsyc}{m}{n}
% Manually declare the stmaryrd font
\DeclareSymbolFont{stmry}{U}{stmry}{m}{n}
\SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n}
% Manually declare the MnSymbolE font
\DeclareFontFamily{OMX}{MnSymbolE}{}
\DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n}
\SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n}
\DeclareFontShape{OMX}{MnSymbolE}{m}{n}{
<-6> MnSymbolE5
<6-7> MnSymbolE6
<7-8> MnSymbolE7
<8-9> MnSymbolE8
<9-10> MnSymbolE9
<10-12> MnSymbolE10
<12-> MnSymbolE12}{}
% Declare specific arrows from txfonts without loading the full package
\makeatletter
\def\re@DeclareMathSymbol#1#2#3#4{%
\let#1=\undefined
\DeclareMathSymbol{#1}{#2}{#3}{#4}}
\re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46}
\re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12}
\re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64}
\re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6}
\re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77}
\re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77}
\makeatother
% \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE
\makeatletter
\def\Decl@Mn@Delim#1#2#3#4{%
\if\relax\noexpand#1%
\let#1\undefined
\fi
\DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}}
\def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}}
\def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}}
\Decl@Mn@Open{\llangle}{mnomx}{'164}
\Decl@Mn@Close{\rrangle}{mnomx}{'171}
\Decl@Mn@Open{\lmoustache}{mnomx}{'245}
\Decl@Mn@Close{\rmoustache}{mnomx}{'244}
\makeatother
% Widecheck
\makeatletter
\DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}}
\def\@widecheck#1#2{%
\setbox\z@\hbox{\m@th$#1#2$}%
\setbox\tw@\hbox{\m@th$#1%
\widehat{%
\vrule\@width\z@\@height\ht\z@
\vrule\@height\z@\@width\wd\z@}$}%
\dp\tw@-\ht\z@
\@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@
\setbox\tw@\hbox{%
\raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box
\tw@}}}%
{\ooalign{\box\tw@ \cr \box\z@}}}
\makeatother
% \mathraisebox{voffset}[height][depth]{something}
\makeatletter
\NewDocumentCommand\mathraisebox{moom}{%
\IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{%
\IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}%
}{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}%
\mathpalette\@temp{#4}}
\makeatletter
% udots (taken from yhmath)
\makeatletter
\def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.}
\mkern2mu\raise4\p@\hbox{.}\mkern1mu
\raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}}
\makeatother
%% Fix array
\newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}}
%% \itexnum is a noop
\newcommand{\itexnum}[1]{#1}
%% Renaming existing commands
\newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}}
\newcommand{\widevec}{\overrightarrow}
\newcommand{\darr}{\downarrow}
\newcommand{\nearr}{\nearrow}
\newcommand{\nwarr}{\nwarrow}
\newcommand{\searr}{\searrow}
\newcommand{\swarr}{\swarrow}
\newcommand{\curvearrowbotright}{\curvearrowright}
\newcommand{\uparr}{\uparrow}
\newcommand{\downuparrow}{\updownarrow}
\newcommand{\duparr}{\updownarrow}
\newcommand{\updarr}{\updownarrow}
\newcommand{\gt}{>}
\newcommand{\lt}{<}
\newcommand{\map}{\mapsto}
\newcommand{\embedsin}{\hookrightarrow}
\newcommand{\Alpha}{A}
\newcommand{\Beta}{B}
\newcommand{\Zeta}{Z}
\newcommand{\Eta}{H}
\newcommand{\Iota}{I}
\newcommand{\Kappa}{K}
\newcommand{\Mu}{M}
\newcommand{\Nu}{N}
\newcommand{\Rho}{P}
\newcommand{\Tau}{T}
\newcommand{\Upsi}{\Upsilon}
\newcommand{\omicron}{o}
\newcommand{\lang}{\langle}
\newcommand{\rang}{\rangle}
\newcommand{\Union}{\bigcup}
\newcommand{\Intersection}{\bigcap}
\newcommand{\Oplus}{\bigoplus}
\newcommand{\Otimes}{\bigotimes}
\newcommand{\Wedge}{\bigwedge}
\newcommand{\Vee}{\bigvee}
\newcommand{\coproduct}{\coprod}
\newcommand{\product}{\prod}
\newcommand{\closure}{\overline}
\newcommand{\integral}{\int}
\newcommand{\doubleintegral}{\iint}
\newcommand{\tripleintegral}{\iiint}
\newcommand{\quadrupleintegral}{\iiiint}
\newcommand{\conint}{\oint}
\newcommand{\contourintegral}{\oint}
\newcommand{\infinity}{\infty}
\newcommand{\bottom}{\bot}
\newcommand{\minusb}{\boxminus}
\newcommand{\plusb}{\boxplus}
\newcommand{\timesb}{\boxtimes}
\newcommand{\intersection}{\cap}
\newcommand{\union}{\cup}
\newcommand{\Del}{\nabla}
\newcommand{\odash}{\circleddash}
\newcommand{\negspace}{\!}
\newcommand{\widebar}{\overline}
\newcommand{\textsize}{\normalsize}
\renewcommand{\scriptsize}{\scriptstyle}
\newcommand{\scriptscriptsize}{\scriptscriptstyle}
\newcommand{\mathfr}{\mathfrak}
\newcommand{\statusline}[2]{#2}
\newcommand{\tooltip}[2]{#2}
\newcommand{\toggle}[2]{#2}
% Theorem Environments
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\newtheorem{prop}{Proposition}
\newtheorem{cor}{Corollary}
\newtheorem*{utheorem}{Theorem}
\newtheorem*{ulemma}{Lemma}
\newtheorem*{uprop}{Proposition}
\newtheorem*{ucor}{Corollary}
\theoremstyle{definition}
\newtheorem{defn}{Definition}
\newtheorem{example}{Example}
\newtheorem*{udefn}{Definition}
\newtheorem*{uexample}{Example}
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\newtheorem{note}{Note}
\newtheorem*{uremark}{Remark}
\newtheorem*{unote}{Note}
%-------------------------------------------------------------------
\begin{document}
%-------------------------------------------------------------------
\section*{Blog - network theory (part 6)}
This page is a [[blog article in progress]], written by [[John Baez]]. To see the final polished article, \href{http://johncarlosbaez.wordpress.com/2011/04/16/network-theory-part-6/}{go to the Azimuth Blog}.
Now for the fun part. Let's see how quantum theory ideas can be used to understand random processes. I'll try to make this post completely self-contained, except at the very end. So, even if you skipped a bunch of the previous ones, this should make sense.
You also don't need to know quantum theory, though you'll have more fun if you do. What we're doing here is very similar, but also strangely different---for reasons I explained .
Suppose we have a population of rabbits in a cage and we'd like to describe its growth in a stochastic way, using probability theory. Let $\psi_n$ be the probability of having $n$ rabbits. We can borrow a trick from quantum theory, and summarize all these probabilities in a like this:
\begin{displaymath}
\Psi = \sum_{n = 0}^\infty \psi_n z^n
\end{displaymath}
The variable $z$ doesn't mean anything in particular, and we don't care if the power series converges. See, in math `formal' means ``it's only symbols on the page, just follow the rules''. It's like when someone says a party is `formal', so you need to wear a white tie: you're not supposed to ask what the tie .
However, there's a good reason for this trick. We can define two operators on formal power series, called the :
\begin{displaymath}
a \Psi = \frac{d}{d z} \Psi
\end{displaymath}
and the :
\begin{displaymath}
a^\dagger \Psi = z \Psi
\end{displaymath}
They're just differentiation and multiplication by $z$, respectively. So, for example, suppose we start out being 100\% sure we have $n$ rabbits for some particular number $n$. Then $\psi_n = 1$, while all the other probabilities are 0, so:
\begin{displaymath}
\Psi = z^n
\end{displaymath}
If we then apply the creation operator, we obtain
\begin{displaymath}
a^\dagger \Psi = z^{n+1}
\end{displaymath}
Voil\`a{}! One more rabbit!
The annihilation operator is more subtle. If we start out with $n$ rabbits:
\begin{displaymath}
\Psi = z^n
\end{displaymath}
and then apply the annihilation operator, we obtain
\begin{displaymath}
a \Psi = n z^{n-1}
\end{displaymath}
What does this mean? The $z^{n-1}$ means we have one fewer rabbit than before. But what about the factor of $n$? It means there were $n$ we could pick a rabbit and make it disappear! This should seem a bit mysterious, for various reasons\ldots{} but we'll see how it works soon enough.
The creation and annihilation operators don't commute:
\begin{displaymath}
(a a^\dagger - a^\dagger a) \Psi = \frac{d}{d z} (z \Psi) - z \frac{d}{d z} \Psi = \Psi
\end{displaymath}
so for short we say:
\begin{displaymath}
a a^\dagger - a^\dagger a = 1
\end{displaymath}
or even shorter:
\begin{displaymath}
[a, a^\dagger] = 1
\end{displaymath}
where the of two operators is $[S,T] = S T - T S$.
The noncommutativity of operators is often claimed to be a special feature of physics, and the are fundamental to understanding the . There, instead of rabbits, we're studying , which are peculiarly abstract entities obeying rather counterintuitive laws. So, it's cool that the same math applies to purely classical entities, like rabbits!
Here's how it works. We want to describe how the probabilities $\psi_n$ change with time, so we write
\begin{displaymath}
\Psi(t) = \sum_{n = 0}^\infty \psi_n(t) z^n
\end{displaymath}
Then, we write down an equation describing the rate of change of $\Psi$:
\begin{displaymath}
\frac{d}{d t} \Psi(t) = H \Psi(t)
\end{displaymath}
Here $H$ is an operator called the , and the equation is called the . The details of the Hamiltonian depend on our problem! But we can often write it down using creation and annihilation operators. Let's do some examples, and then I'll tell you the general rule.
time I told you what happens when we stand in a river and catch fish as they randomly swim past. Today we're doing rabbits. This works almost the same way. But there's a fundamental difference: rabbits don't swim.
So, suppose an inexhaustible supply of rabbits are randomly roaming around a huge field, and each time a rabbit enters a certain area, we catch it and add it to our population of caged rabbits. Suppose that on average we catch one rabbit per unit time. Suppose the chance of catching a rabbit during any interval of time is independent of what happens before or afterwards. What is the Hamiltonian describing the probability distribution of caged rabbits, as a function of time?
There's an obvious dumb guess: the creation operator! However, we saw last time that this doesn't work, and we saw how to fix it. The right answer is
\begin{displaymath}
H = a^\dagger - 1
\end{displaymath}
To see why, suppose for example that at some time $t$ we have $n$ rabbits, so:
\begin{displaymath}
\Psi(t) = z^n
\end{displaymath}
Then the master equation says that at this moment,
\begin{displaymath}
\frac{d}{d t} \Psi(t) = (a^\dagger - 1) \Psi(t) = z^{n+1} - z^n
\end{displaymath}
Since $\Psi = \sum_{n = 0}^\infty \psi_n(t) z^n$, this implies that the coefficients of our formal power series are changing like this:
\begin{displaymath}
\frac{d}{d t} \psi_{n+1}(t) = 1
\end{displaymath}
\begin{displaymath}
\frac{d}{d t} \psi_{n}(t) = -1
\end{displaymath}
while all the rest have zero derivative at this moment. And that's exactly right! See, $\psi_{n+1}(t)$ is the probability of having one more rabbit, and this is going up at rate 1. Meanwhile, $\psi_n(t)$ is the probability of having $n$ rabbits, and this is going down at the same rate.
Show that with this Hamiltonian, the master equation predicts that the expected number of rabbits grows linearly.
Don't worry: no rabbits are actually injured in the research that Jacob Biamonte is doing here at the Centre for Quantum Technologies. He's keeping them well cared for in a big room on the 6th floor. This is just a thought experiment.
Suppose a mean nasty guy had a population of rabbits in a cage and didn't feed them at all. Suppose that each rabbit has a unit probability of dying per unit time. And as always, suppose the probability of this happening in any interval of time is independent of what happens before or after that time.
What is the Hamiltonian? Again there's a dumb guess: the annihilation operator! And again this guess is wrong, but it's not far off. As before, the right answer includes a `correction term':
\begin{displaymath}
H = a - N
\end{displaymath}
This time the correction term is famous in its own right. It's called the :
\begin{displaymath}
N = a^\dagger a
\end{displaymath}
The reason is that if we start with $n$ rabbits, and apply this operator, it amounts to multiplication by $n$:
\begin{displaymath}
N z^n = z \frac{d}{d z} z^n = n z^n
\end{displaymath}
Let's see why this guess is right. Again, suppose that at some particular time $t$ we have $n$ rabbits, so
\begin{displaymath}
\Psi(t) = z^n
\end{displaymath}
Then the master equation says that at this time
\begin{displaymath}
\frac{d}{d t} \Psi(t) = (a - N) \Psi(t) = n z^{n-1} - n z^n
\end{displaymath}
So, our probabilities are changing like this:
\begin{displaymath}
\frac{d}{d t} \psi_{n-1}(t) = n
\end{displaymath}
\begin{displaymath}
\frac{d}{d t} \psi_n(t) = -n
\end{displaymath}
while the rest have zero derivative. And this is good! We're starting with $n$ rabbits, and each has a unit probability per unit time of dying. So, the chance of having one less should be going up at rate $n$. And the chance of having the same number we started with should be going at the same rate.
Show that with this Hamiltonian, the master equation predicts that the expected number of rabbits decays exponentially.
Suppose we have a strange breed of rabbits that reproduce asexually. Suppose that each rabbit has a unit probability per unit time of having a baby rabbit, thus effectively duplicating itself.
As you can see from the cryptic picture above, this `duplication' process takes one rabbit as input and has two rabbits as output. So, if you've been paying attention, you should be ready with a dumb guess for the Hamiltonian: $a^\dagger a^\dagger a$. This operator annihilates one rabbit and then creates two!
But you should also suspect that this dumb guess will need a `correction term'. And you're right! As always, the correction terms makes the probability of things staying the same at exactly the rate that the probability of things changing .
You should guess the correction term\ldots{} but I'll just tell you:
\begin{displaymath}
H = a^\dagger a^\dagger a - N
\end{displaymath}
We can check this in the usual way, by seeing what it does when we have $n$ rabbits:
\begin{displaymath}
H z^n = z^2 \frac{d}{d z} z^n - n z^n = n z^{n+1} - n z^n
\end{displaymath}
That's good: since there are $n$ rabbits, the rate of rabbit duplication is $n$. This is the rate at which the probability of having one more rabbit goes up\ldots{} and also the rate at which the probability of having $n$ rabbits goes down.
Show that with this Hamiltonian, the master equation predicts that the expected number of rabbits grows exponentially.
Let's do some stranger examples, just so you can see the general pattern.
Here each pair of rabbits has a unit probability per unit time of fighting a duel with only one survivor. You might guess the Hamiltonian $a^\dagger a a$, but in fact:
\begin{displaymath}
H = a^\dagger a a - N(N-1)
\end{displaymath}
Let's see why this is right! Let's see what it does when we have $n$ rabbits:
\begin{displaymath}
H z^n = z \frac{d^2}{d z^2} z^n - n(n-1)z^n = n(n-1) z^{n-1} - n(n-1)z^n
\end{displaymath}
That's good: since there are $n(n-1)$ ordered pairs of rabbits, the rate at which duels take place is $n(n-1)$. This is the rate at which the probability of having one less rabbit goes up\ldots{} and also the rate at which the probability of having $n$ rabbits goes down.
(If you prefer pairs of rabbits, just divide the Hamiltonian by 2. We should talk about this more, but not now.)
Now each of rabbits has a unit probability per unit time of getting into a fight with only one survivor! I don't know the technical term for a three-way fight, but perhaps it counts as a small `brawl' or `melee'. In fact the Wikipedia article for shows three rabbits in suits of armor, fighting it out:
Now the Hamiltonian is:
\begin{displaymath}
H = a^\dagger a^3 - N(N-1)(N-2)
\end{displaymath}
You can check that:
\begin{displaymath}
H z^n = n(n-1)(n-2) z^{n-2} - n(n-1)(n-2) z^n
\end{displaymath}
and this is good, because $n(n-1)(n-2)$ is the number of ordered triples of rabbits. You can see how this number shows up from the math, too:
\begin{displaymath}
a^3 z^n = \frac{d^3}{d z^3} z^n = n(n-1)(n-2) z^{n-3}
\end{displaymath}
Suppose we have a process taking $k$ rabbits as input and having $j$ rabbits as output:
I hope you can guess the Hamiltonian I'll use for this:
\begin{displaymath}
H = {a^{\dagger}}^j a^k - N(N-1) \cdots (N-k+1)
\end{displaymath}
This works because
\begin{displaymath}
a^k z^n = \frac{d^k}{d z^k} z^n = n(n-1) \cdots (n-k+1) z^{n-k}
\end{displaymath}
so that if we apply our Hamiltonian to $n$ rabbits, we get
\begin{displaymath}
H z^n = n(n-1) \cdots (n-k+1) (z^{n+j-k} - z^n)
\end{displaymath}
See? As the probability of having $n+j-k$ rabbits goes up, the probability of having $n$ rabbits goes down, at an equal rate. This sort of balance is necessary for $H$ to be a sensible Hamiltonian in this sort of stochastic theory (an , to be precise). And the rate is exactly the number of ordered $k$-tuples taken from a collection of $n$ rabbits. This is called the $k$th of $n$, and written as follows:
\begin{displaymath}
n^{\underline{k}} = n(n-1) \cdots (n-k+1)
\end{displaymath}
Since we can apply functions to operators as well as numbers, we can write our Hamiltonian as:
\begin{displaymath}
H = {a^{\dagger}}^j a^k - N^{\underline{k}}
\end{displaymath}
Let's do one more example just to test our understanding. This time each pair of rabbits has a unit probability per unit time of bumping into one another, exchanging a friendly kiss and walking off. This shouldn't affect the rabbit population at all! But let's follow the rules and see what they say.
According to our rules, the Hamiltonian should be:
\begin{displaymath}
H = {a^{\dagger}}^2 a^2 - N(N-1)
\end{displaymath}
However,
\begin{displaymath}
{a^{\dagger}}^2 a^2 z^n = z^2 \frac{d^2}{dz^2} z^n = n(n-1) z^n = N(N-1) z^n
\end{displaymath}
and since $z^n$ form a `basis' for the formal power series, we see that:
\begin{displaymath}
{a^{\dagger}}^2 a^2 = N(N-1)
\end{displaymath}
so in fact:
\begin{displaymath}
H = 0
\end{displaymath}
That's good: if the Hamiltonian is zero, the master equation will say
\begin{displaymath}
\frac{d}{d t} \Psi(t) = 0
\end{displaymath}
so the population, or more precisely the probability of having any given number of rabbits, will be constant.
There's another nice little lesson here. Copying the calculation we just did, it's easy to see that:
\begin{displaymath}
{a^{\dagger}}^k a^k = N^{\underline{k}}
\end{displaymath}
This is a cute formula for falling powers of the number operator in terms of annihilation and creation operators. It means that for the general transition we saw before:
we can write the Hamiltonian in two equivalent ways:
\begin{displaymath}
H = {a^{\dagger}}^j a^k - N^{\underline{k}} = {a^{\dagger}}^j a^k - {a^{\dagger}}^k a^k
\end{displaymath}
Okay, that's it for now! We can, and will, generalize all this stuff to stochastic Petri nets where there are things of many different kinds---not just rabbits. And we'll see that the master equation we get matches the answer to the puzzle in . That's pretty easy. But first, we'll have a guest post by Jacob Biamonte, who will explain an example from population biology.
category: blog
\end{document}