\documentclass[12pt,titlepage]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{mathtools}
\usepackage{graphicx}
\usepackage{color}
\usepackage{ucs}
\usepackage[utf8x]{inputenc}
\usepackage{xparse}
\usepackage{hyperref}
%----Macros----------
%
% Unresolved issues:
%
% \righttoleftarrow
% \lefttorightarrow
%
% \color{} with HTML colorspec
% \bgcolor
% \array with options (without options, it's equivalent to the matrix environment)
% Of the standard HTML named colors, white, black, red, green, blue and yellow
% are predefined in the color package. Here are the rest.
\definecolor{aqua}{rgb}{0, 1.0, 1.0}
\definecolor{fuschia}{rgb}{1.0, 0, 1.0}
\definecolor{gray}{rgb}{0.502, 0.502, 0.502}
\definecolor{lime}{rgb}{0, 1.0, 0}
\definecolor{maroon}{rgb}{0.502, 0, 0}
\definecolor{navy}{rgb}{0, 0, 0.502}
\definecolor{olive}{rgb}{0.502, 0.502, 0}
\definecolor{purple}{rgb}{0.502, 0, 0.502}
\definecolor{silver}{rgb}{0.753, 0.753, 0.753}
\definecolor{teal}{rgb}{0, 0.502, 0.502}
% Because of conflicts, \space and \mathop are converted to
% \itexspace and \operatorname during preprocessing.
% itex: \space{ht}{dp}{wd}
%
% Height and baseline depth measurements are in units of tenths of an ex while
% the width is measured in tenths of an em.
\makeatletter
\newdimen\itex@wd%
\newdimen\itex@dp%
\newdimen\itex@thd%
\def\itexspace#1#2#3{\itex@wd=#3em%
\itex@wd=0.1\itex@wd%
\itex@dp=#2ex%
\itex@dp=0.1\itex@dp%
\itex@thd=#1ex%
\itex@thd=0.1\itex@thd%
\advance\itex@thd\the\itex@dp%
\makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}}
\makeatother
% \tensor and \multiscript
\makeatletter
\newif\if@sup
\newtoks\@sups
\def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}%
\def\reset@sup{\@supfalse\@sups={}}%
\def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else%
\ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}%
\else \append@sup#2 \@suptrue \fi%
\expandafter\mk@scripts\fi}
\def\tensor#1#2{\reset@sup#1\mk@scripts#2_/}
\def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2%
\reset@sup\mk@scripts#3_/}
\makeatother
% \slash
\makeatletter
\newbox\slashbox \setbox\slashbox=\hbox{$/$}
\def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$}
\@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa
\copy\slashbox \kern-\@tempdima \box\@tempboxa}
\def\slash{\protect\itex@pslash}
\makeatother
% math-mode versions of \rlap, etc
% from Alexander Perlis, "A complement to \smash, \llap, and lap"
% http://math.arizona.edu/~aprl/publications/mathclap/
\def\clap#1{\hbox to 0pt{\hss#1\hss}}
\def\mathllap{\mathpalette\mathllapinternal}
\def\mathrlap{\mathpalette\mathrlapinternal}
\def\mathclap{\mathpalette\mathclapinternal}
\def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}}
\def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}}
\def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}}
% Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2}
\let\oldroot\root
\def\root#1#2{\oldroot #1 \of{#2}}
\renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}}
% Manually declare the txfonts symbolsC font
\DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n}
\SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n}
\DeclareFontSubstitution{U}{txsyc}{m}{n}
% Manually declare the stmaryrd font
\DeclareSymbolFont{stmry}{U}{stmry}{m}{n}
\SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n}
% Manually declare the MnSymbolE font
\DeclareFontFamily{OMX}{MnSymbolE}{}
\DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n}
\SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n}
\DeclareFontShape{OMX}{MnSymbolE}{m}{n}{
<-6> MnSymbolE5
<6-7> MnSymbolE6
<7-8> MnSymbolE7
<8-9> MnSymbolE8
<9-10> MnSymbolE9
<10-12> MnSymbolE10
<12-> MnSymbolE12}{}
% Declare specific arrows from txfonts without loading the full package
\makeatletter
\def\re@DeclareMathSymbol#1#2#3#4{%
\let#1=\undefined
\DeclareMathSymbol{#1}{#2}{#3}{#4}}
\re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46}
\re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12}
\re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64}
\re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6}
\re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77}
\re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77}
\makeatother
% \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE
\makeatletter
\def\Decl@Mn@Delim#1#2#3#4{%
\if\relax\noexpand#1%
\let#1\undefined
\fi
\DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}}
\def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}}
\def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}}
\Decl@Mn@Open{\llangle}{mnomx}{'164}
\Decl@Mn@Close{\rrangle}{mnomx}{'171}
\Decl@Mn@Open{\lmoustache}{mnomx}{'245}
\Decl@Mn@Close{\rmoustache}{mnomx}{'244}
\makeatother
% Widecheck
\makeatletter
\DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}}
\def\@widecheck#1#2{%
\setbox\z@\hbox{\m@th$#1#2$}%
\setbox\tw@\hbox{\m@th$#1%
\widehat{%
\vrule\@width\z@\@height\ht\z@
\vrule\@height\z@\@width\wd\z@}$}%
\dp\tw@-\ht\z@
\@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@
\setbox\tw@\hbox{%
\raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box
\tw@}}}%
{\ooalign{\box\tw@ \cr \box\z@}}}
\makeatother
% \mathraisebox{voffset}[height][depth]{something}
\makeatletter
\NewDocumentCommand\mathraisebox{moom}{%
\IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{%
\IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}%
}{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}%
\mathpalette\@temp{#4}}
\makeatletter
% udots (taken from yhmath)
\makeatletter
\def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.}
\mkern2mu\raise4\p@\hbox{.}\mkern1mu
\raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}}
\makeatother
%% Fix array
\newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}}
%% \itexnum is a noop
\newcommand{\itexnum}[1]{#1}
%% Renaming existing commands
\newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}}
\newcommand{\widevec}{\overrightarrow}
\newcommand{\darr}{\downarrow}
\newcommand{\nearr}{\nearrow}
\newcommand{\nwarr}{\nwarrow}
\newcommand{\searr}{\searrow}
\newcommand{\swarr}{\swarrow}
\newcommand{\curvearrowbotright}{\curvearrowright}
\newcommand{\uparr}{\uparrow}
\newcommand{\downuparrow}{\updownarrow}
\newcommand{\duparr}{\updownarrow}
\newcommand{\updarr}{\updownarrow}
\newcommand{\gt}{>}
\newcommand{\lt}{<}
\newcommand{\map}{\mapsto}
\newcommand{\embedsin}{\hookrightarrow}
\newcommand{\Alpha}{A}
\newcommand{\Beta}{B}
\newcommand{\Zeta}{Z}
\newcommand{\Eta}{H}
\newcommand{\Iota}{I}
\newcommand{\Kappa}{K}
\newcommand{\Mu}{M}
\newcommand{\Nu}{N}
\newcommand{\Rho}{P}
\newcommand{\Tau}{T}
\newcommand{\Upsi}{\Upsilon}
\newcommand{\omicron}{o}
\newcommand{\lang}{\langle}
\newcommand{\rang}{\rangle}
\newcommand{\Union}{\bigcup}
\newcommand{\Intersection}{\bigcap}
\newcommand{\Oplus}{\bigoplus}
\newcommand{\Otimes}{\bigotimes}
\newcommand{\Wedge}{\bigwedge}
\newcommand{\Vee}{\bigvee}
\newcommand{\coproduct}{\coprod}
\newcommand{\product}{\prod}
\newcommand{\closure}{\overline}
\newcommand{\integral}{\int}
\newcommand{\doubleintegral}{\iint}
\newcommand{\tripleintegral}{\iiint}
\newcommand{\quadrupleintegral}{\iiiint}
\newcommand{\conint}{\oint}
\newcommand{\contourintegral}{\oint}
\newcommand{\infinity}{\infty}
\newcommand{\bottom}{\bot}
\newcommand{\minusb}{\boxminus}
\newcommand{\plusb}{\boxplus}
\newcommand{\timesb}{\boxtimes}
\newcommand{\intersection}{\cap}
\newcommand{\union}{\cup}
\newcommand{\Del}{\nabla}
\newcommand{\odash}{\circleddash}
\newcommand{\negspace}{\!}
\newcommand{\widebar}{\overline}
\newcommand{\textsize}{\normalsize}
\renewcommand{\scriptsize}{\scriptstyle}
\newcommand{\scriptscriptsize}{\scriptscriptstyle}
\newcommand{\mathfr}{\mathfrak}
\newcommand{\statusline}[2]{#2}
\newcommand{\tooltip}[2]{#2}
\newcommand{\toggle}[2]{#2}
% Theorem Environments
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\newtheorem{prop}{Proposition}
\newtheorem{cor}{Corollary}
\newtheorem*{utheorem}{Theorem}
\newtheorem*{ulemma}{Lemma}
\newtheorem*{uprop}{Proposition}
\newtheorem*{ucor}{Corollary}
\theoremstyle{definition}
\newtheorem{defn}{Definition}
\newtheorem{example}{Example}
\newtheorem*{udefn}{Definition}
\newtheorem*{uexample}{Example}
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\newtheorem{note}{Note}
\newtheorem*{uremark}{Remark}
\newtheorem*{unote}{Note}
%-------------------------------------------------------------------
\begin{document}
%-------------------------------------------------------------------
\section*{Blog - network theory (part 8)}
This page is a [[Blog articles in progress|blog article in progress]], written by [[John Baez]]. To see a discussion of this article while it was being written, \href{http://www.math.ntnu.no/~stacey/Mathforge/Azimuth/comments.php?DiscussionID=815&Focus=5456#Comment_5456}{visit the Azimuth Forum}. For the final polished version, \href{http://johncarlosbaez.wordpress.com/2011/09/09/network-theory-part-8/}{go to the Azimuth Blog}.
Summer vacation is over! Time to get back to work!
This month [[Brendan Fong]] is visiting the Centre for Quantum Technologies and working with me on stochastic Petri nets. He's proved two interesting results, which he wants to explain.
To understand what he's done, you need to know how to get the rate equation and the master equation from a stochastic Petri net. We've seen how. But it's been a long time since the last article in this series, so today I'll start with some review. And at the end, just for fun, I'll say a bit more about how Feynman diagrams show up in this theory.
Since I'm an experienced teacher, I'll assume you've forgotten . This has some advantages! I can change some of my earlier terminology---improve it a bit here and there---and you won't even notice.
\textbf{Definition.} A consists of a set $S$ of and a set $T$ of , together with a function
\begin{displaymath}
i : S \times T \to \mathbb{N}
\end{displaymath}
saying how many things of each species appear in the for each transition, and a function
\begin{displaymath}
o: S \times T \to \mathbb{N}
\end{displaymath}
saying how many things of each species appear in the .
We can draw pictures of Petri nets. For example, here's a Petri net with two species and three transitions:
It should be clear that the transition `predation' has one wolf and one rabbit as input, and two wolves as output.
A `stochastic' Petri net goes further: it also says the rate at which each transition occurs.
\textbf{Definition.} A is a Petri net together with a function
\begin{displaymath}
r: T \to [0,\infty)
\end{displaymath}
giving a for each transition.
Starting from any stochastic Petri net, we can get two things. First:
$\bullet$ The . This says how the changes with time.
$\bullet$ The . This says how the changes with time.
The master equation is stochastic: it describes how probabilities change with time. The rate equation is deterministic.
The master equation is more fundamental. It's like the equations of quantum electrodynamics, which describe the amplitudes for creating and annihilating individual photons. The rate equation is less fundamental. It's like the classical Maxwell equations, which describe changes in the electromagnetic field in a deterministic way. The classical Maxwell equations are an approximation to quantum electrodynamics. This approximation gets good where there are lots of photons all piling on top of each other to form nice waves.
Similarly, the rate equation can be derived from the master equation where the number of things of each species become large, and the fluctuations in these numbers become negligible.
But I won't do this derivation today! Nor will I probe more deeply into the analogy with quantum field theory, even though that's my ultimate goal. Today I'm content to remind you what the master equation and rate equation .
The rate equation is simpler, so let's do that first.
Suppose we have a stochastic Petri net with $k$ different species. Let $x_i$ be the number of things of the $i$th species. Then the rate equation looks like this:
\begin{displaymath}
\frac{d x_i}{d t} = ???
\end{displaymath}
It's really a bunch of equations, one for each $1 \le i \le k$. But what is the right-hand side?
The right-hand side is a sum of terms, one for each transition in our Petri net. So, let's start by assuming our Petri net has just one transition.
Suppose the $i$th species appears as input to this transition $m_i$ times, and as output $n_i$ times. Then the rate equation is
\begin{displaymath}
\frac{d x_i}{d t} = r (n_i - m_i) x_1^{m_1} \cdots x_k^{m_k}
\end{displaymath}
where $r$ is the rate constant for this transition.
That's really all there is to it! But we can make it look nicer. Let's make up a vector
\begin{displaymath}
x = (x_1, \dots , x_k) \in [0,\infty)^k
\end{displaymath}
that says how many things there are of each species. Similarly let's make up an
\begin{displaymath}
m = (m_1, \dots, m_k) \in \mathbb{N}^k
\end{displaymath}
and an
\begin{displaymath}
n = (n_1, \dots, n_k) \in \mathbb{N}^k
\end{displaymath}
for our transition. To be cute, let's also define
\begin{displaymath}
x^m = x_1^{m_1} \cdots x_k^{m_k}
\end{displaymath}
Then we can write the rate equation for a single transition like this:
\begin{displaymath}
\frac{d x}{d t} = r (n-m) x^m
\end{displaymath}
Next let's do a general stochastic Petri net, with lots of transitions. Let's write $T$ for the set of transitions and $r(\tau)$ for the rate constant of the transition $\tau \in T$. Let $n(\tau)$ and $m(\tau)$ be the input and output vectors of the transition $\tau$. Then the rate equation is:
\begin{displaymath}
\frac{d x}{d t} = \sum_{\tau \in T} r(\tau) (n(\tau) - m(\tau)) x^{m(\tau)}
\end{displaymath}
For example, consider our rabbits and wolves:
Suppose
\begin{itemize}%
\item the rate constant for `birth' is $\beta$
\item the rate constant for `predation' is $\gamma$
\item the rate constant for `death' is $\delta$
\end{itemize}
Let $x_1(t)$ be the number of rabbits and $x_1(t)$ the number of wolves at time $t$. Then the rate equation looks like this:
\begin{displaymath}
\frac{d x_1}{d t} = \beta x_1 - \gamma x_1 x_2
\end{displaymath}
\begin{displaymath}
\frac{d x_2}{d t} = \gamma x_1 x_2 - \delta x_2
\end{displaymath}
Now let's do something new. In I explained how to write down the master equation for a stochastic Petri net with just species. Now let's generalize that. Luckily, the ideas are exactly the same.
So, suppose we have a stochastic Petri net with $k$ different species. Let $\psi_{n_1, \dots, n_k}$ be the probability that we have $n_1$ things of the first species, $n_2$ of the second species, and so on. The master equation will say how all these probabilities change with time.
To keep the notation clean, let's introduce a vector
\begin{displaymath}
n = (n_1, \dots, n_k) \in \mathbb{N}^k
\end{displaymath}
and let
\begin{displaymath}
\psi_n = \psi_{n_1, \dots, n_k}
\end{displaymath}
Then, let's take all these probabilities and cook up a formal power series that has them as coefficients: as we've seen, this is a powerful trick. To do this, we'll bring in some variables $z_1, \dots, z_k$ and write
\begin{displaymath}
z^n = z_1^{n_1} \cdots z_k^{n_k}
\end{displaymath}
as a convenient abbreviation. Then any formal power series in these variables looks like this:
\begin{displaymath}
\Psi = \sum_{n \in \mathbb{N}^k} \psi_n z^n
\end{displaymath}
We call $\Psi$ a \textbf{state} if the probabilities sum to 1 as they should:
\begin{displaymath}
\sum_n \psi_n = 1
\end{displaymath}
The simplest example of a state is a monomial:
\begin{displaymath}
z^n = z_1^{n_1} \cdots z_k^{n_k}
\end{displaymath}
This is a state where we are 100\% sure that there are $n_1$ things of the first species, $n_2$ of the second species, and so on. We call such a state a \textbf{pure state}, since physicists use this term to describe a state where we know for sure exactly what's going on. Sometimes a general state, one that might not be pure, is called .
The master equation says how a state evolves in time. It looks like this:
\begin{displaymath}
\frac{d}{d t} \Psi(t) = H \Psi(t)
\end{displaymath}
So, I just need to tell you what $H$ is!
It's called the \textbf{Hamiltonian}. It's a linear operator built from special operators that annihilate and create things of various species. Namely, for each state $1 \le i \le k$ we have a
\begin{displaymath}
a_i \Psi = \frac{d}{d z_i} \Psi
\end{displaymath}
and a :
\begin{displaymath}
a_i^\dagger \Psi = z_i \Psi
\end{displaymath}
How do we build $H$ from these? Suppose we've got a stochastic Petri net whose set of transitions is $T$. As before, write $r(\tau)$ for the rate constant of the transition $\tau \in T$, and let $n(\tau)$ and $m(\tau)$ be the input and output vectors of this transition. Then:
\begin{displaymath}
H = \sum_{\tau \in T} r(\tau) \, ({a^\dagger}^{n(\tau)} - {a^\dagger}^{m(\tau)}) \, a^{m(\tau)}
\end{displaymath}
where as usual we've introduce some shorthand notations to keep from going insane. For example:
\begin{displaymath}
a^{m(\tau)} = a_1^{m_1(\tau)} \cdots a_k^{m_k(\tau)}
\end{displaymath}
and
\begin{displaymath}
{a^\dagger}^{m(\tau)} = {a_1^\dagger }^{m_1(\tau)} \cdots {a_k^\dagger}^{m_k(\tau)}
\end{displaymath}
Now, it's not surprising that each transition $\tau$ contributes a term to $H$. It's also not surprising that this term is proportional to the rate constant $r(\tau)$. The only tricky thing is the expression
\begin{displaymath}
({a^\dagger}^{n(\tau)} - {a^\dagger}^{m(\tau)})a^{m(\tau)}
\end{displaymath}
How can we understand it? The basic idea is this. We've got two terms here. The first term:
\begin{displaymath}
{a^\dagger}^{n(\tau)} a^{m(\tau)}
\end{displaymath}
describes how $m_i(\tau)$ things of the $i$th species get annihilated, and $n_i(\tau)$ things of the $i$th species get created. Of course this happens thanks to our transition $\tau$. The second term:
\begin{displaymath}
- {a^\dagger}^{m(\tau)} a^{m(\tau)}
\end{displaymath}
is a bit harder to understand, but it says how the probability that happens---that we remain in the same pure state--- as time passes. Again this happens due to our transition $\tau$.
In fact, the second term must take precisely the form it does to ensure `conservation of total probability'. In other words: if the probabilities $\psi_n$ sum to 1 at time zero, we want these probabilities to still sum to 1 at any later time. And for this, we need that second term to be what it is! In we saw this in the special case where there's only one species. The general case works the same way.
Let's look at an example. Consider our rabbits and wolves yet again:
and again suppose the rate constants for birth, predation and death are $\beta$, $\gamma$ and $\delta$, respectively. We have
\begin{displaymath}
\Psi = \sum_n \psi_n \psi^n
\end{displaymath}
where $\psi_n = \psi_{n_1, n_2}$ is the probability of having $n_1$ rabbits and $n_2$ wolves. These probabilities evolve according to the equation
\begin{displaymath}
\frac{d}{d t} \Psi(t) = H \Psi(t)
\end{displaymath}
where the Hamiltonian is
\begin{displaymath}
H = \beta B + \gamma C + \delta D
\end{displaymath}
and $B$, $C$ and $D$ are operators describing birth, predation and death, respectively. ($B$ is for birth, $D$ is for death\ldots{} and you can call predation `consumption' if you want something that starts with $C$.) What are these operators? Just follow the rules I described:
\begin{displaymath}
B = {a_1^\dagger}^2 a_1 - a_1^\dagger a_1
\end{displaymath}
\begin{displaymath}
C = {a_2^\dagger}^2 a_1 a_2 - a_1^\dagger a_2^\dagger a_1 a_2
\end{displaymath}
\begin{displaymath}
D = a_2 - a_2^\dagger a_2
\end{displaymath}
In each case, the first term is easy to understand:
\begin{itemize}%
\item Birth annihilates one rabbit and creates two rabbits.
\item Predation annihilates one rabbit and one wolf and creates two wolves.
\item Death annihilates one wolf.
\end{itemize}
The second term is trickier, but I told you how it works.
How do we solve the master equation? If we don't worry about mathematical rigor too much, it's easy. The solution of
\begin{displaymath}
\frac{d}{d t} \Psi(t) = H \Psi(t)
\end{displaymath}
should be
\begin{displaymath}
\Psi(t) = e^{t H} \Psi(0)
\end{displaymath}
and we can hope that
\begin{displaymath}
e^{t H} = 1 + t H + \frac{(t H)^2}{2!} + \cdots
\end{displaymath}
so that
\begin{displaymath}
\Psi(t) = \Psi(0) + t H \Psi(0) + \frac{t^2}{2!} H^2 \Psi(0) + \cdots
\end{displaymath}
Of course there's always the question of whether this power series . In many contexts it's not, but that's not necessarily a disaster: the series can still be to the right answer, or to the right answer.
But let's not worry about these subtleties yet! Let's just imagine our rabbits and wolves, with Hamiltonian
\begin{displaymath}
H = \beta B + \gamma C + \delta D
\end{displaymath}
Now, imagine working out
\begin{displaymath}
\Psi(t) = \Psi(0) + t H \Psi(0) + \frac{t^2}{2!} H^2 \Psi(0) + \frac{t^3}{3!} H^3 \Psi(0) + \cdots
\end{displaymath}
We'll get lots of terms involving products of $B$, $C$ and $D$ hitting our original state $\Psi(0)$. And we can draw these as diagrams! For example, suppose we start with one rabbit and one wolf. Then
\begin{displaymath}
\Psi(0) = z_1 z_2
\end{displaymath}
And suppose we want to compute
\begin{displaymath}
H^3 \Psi(0) = (\beta B + \gamma C + \delta D)^3 \Psi(0)
\end{displaymath}
as part of the task of computing $\Psi(t)$. Then we'll get lots of terms: 27, to be precise. Let's take one of these terms, for example the one proportional to:
\begin{displaymath}
D C B \Psi(0)
\end{displaymath}
We can draw this as a sum of Feynman diagrams, including this:
In this diagram, we start with one rabbit and one wolf at top. As we read the diagram from top to bottom, first a rabbit is born ($B$), then predation occur ($C$), and finally a wolf dies ($D$). The end result is again a rabbit and a wolf.
This is just one of four Feynman diagrams we should draw in our sum for $D C B \Psi(0)$, since either of the two rabbits could have been eaten, and either wolf could have died. So, the end result of computing
\begin{displaymath}
H^3 \Psi(0)
\end{displaymath}
will involve a lot of Feynman diagrams\ldots{} and of course computing
\begin{displaymath}
\Psi(t) = \Psi(0) + t H \Psi(0) + \frac{t^2}{2!} H^2 \Psi(0) + \frac{t^3}{3!} H^3 \Psi(0) + \cdots
\end{displaymath}
will involve even more, even if we get tired and give up after the first few terms. So, this Feynman diagram business may seem quite tedious\ldots{} and it may not be obvious how it helps.
But it does, sometimes!
Now is not the time for me to describe the `practical' benefits of Feynman diagrams. Instead, I'll just point out one conceptual benefit. We started with what seemed like a purely computational chore, namely computing
\begin{displaymath}
\Psi(t) = \Psi(0) + t H \Psi(0) + \frac{t^2}{2!} H^2 \Psi(0) + \cdots
\end{displaymath}
But then we saw---at least roughlyhow this series could be seen as having a clear meaning! It can be written as a sum over diagrams, each of which represents a of rabbits and wolves. So, it's what physicists call a .
Feynman invented the idea of a sum over histories in the context of quantum field theory. At the time this idea seemed quite mind-blowing, for various reasons. First, it involved elementary particles instead of everyday things like rabbits and wolves. Second, it involved complex `amplitudes' instead of real probabilities. Third, it actually involved integrals instead of sums. And fourth, a lot of these integrals diverged, giving infinite answers that needed to be `cured' somehow!
Now we're seeing a sum over histories in a more down-to-earth context without all these complications. A lot of the underlying math is analogous\ldots{} but now there's nothing mind-blowing about it: it's quite easy to understand!
category: blog
\end{document}