\documentclass[12pt,titlepage]{article} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathtools} \usepackage{graphicx} \usepackage{color} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{xparse} \usepackage{hyperref} %----Macros---------- % % Unresolved issues: % % \righttoleftarrow % \lefttorightarrow % % \color{} with HTML colorspec % \bgcolor % \array with options (without options, it's equivalent to the matrix environment) % Of the standard HTML named colors, white, black, red, green, blue and yellow % are predefined in the color package. Here are the rest. \definecolor{aqua}{rgb}{0, 1.0, 1.0} \definecolor{fuschia}{rgb}{1.0, 0, 1.0} \definecolor{gray}{rgb}{0.502, 0.502, 0.502} \definecolor{lime}{rgb}{0, 1.0, 0} \definecolor{maroon}{rgb}{0.502, 0, 0} \definecolor{navy}{rgb}{0, 0, 0.502} \definecolor{olive}{rgb}{0.502, 0.502, 0} \definecolor{purple}{rgb}{0.502, 0, 0.502} \definecolor{silver}{rgb}{0.753, 0.753, 0.753} \definecolor{teal}{rgb}{0, 0.502, 0.502} % Because of conflicts, \space and \mathop are converted to % \itexspace and \operatorname during preprocessing. % itex: \space{ht}{dp}{wd} % % Height and baseline depth measurements are in units of tenths of an ex while % the width is measured in tenths of an em. \makeatletter \newdimen\itex@wd% \newdimen\itex@dp% \newdimen\itex@thd% \def\itexspace#1#2#3{\itex@wd=#3em% \itex@wd=0.1\itex@wd% \itex@dp=#2ex% \itex@dp=0.1\itex@dp% \itex@thd=#1ex% \itex@thd=0.1\itex@thd% \advance\itex@thd\the\itex@dp% \makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}} \makeatother % \tensor and \multiscript \makeatletter \newif\if@sup \newtoks\@sups \def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}% \def\reset@sup{\@supfalse\@sups={}}% \def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else% \ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}% \else \append@sup#2 \@suptrue \fi% \expandafter\mk@scripts\fi} \def\tensor#1#2{\reset@sup#1\mk@scripts#2_/} \def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2% \reset@sup\mk@scripts#3_/} \makeatother % \slash \makeatletter \newbox\slashbox \setbox\slashbox=\hbox{$/$} \def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$} \@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa \copy\slashbox \kern-\@tempdima \box\@tempboxa} \def\slash{\protect\itex@pslash} \makeatother % math-mode versions of \rlap, etc % from Alexander Perlis, "A complement to \smash, \llap, and lap" % http://math.arizona.edu/~aprl/publications/mathclap/ \def\clap#1{\hbox to 0pt{\hss#1\hss}} \def\mathllap{\mathpalette\mathllapinternal} \def\mathrlap{\mathpalette\mathrlapinternal} \def\mathclap{\mathpalette\mathclapinternal} \def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}} \def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}} \def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}} % Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2} \let\oldroot\root \def\root#1#2{\oldroot #1 \of{#2}} \renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}} % Manually declare the txfonts symbolsC font \DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n} \SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n} \DeclareFontSubstitution{U}{txsyc}{m}{n} % Manually declare the stmaryrd font \DeclareSymbolFont{stmry}{U}{stmry}{m}{n} \SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n} % Manually declare the MnSymbolE font \DeclareFontFamily{OMX}{MnSymbolE}{} \DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n} \SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n} \DeclareFontShape{OMX}{MnSymbolE}{m}{n}{ <-6> MnSymbolE5 <6-7> MnSymbolE6 <7-8> MnSymbolE7 <8-9> MnSymbolE8 <9-10> MnSymbolE9 <10-12> MnSymbolE10 <12-> MnSymbolE12}{} % Declare specific arrows from txfonts without loading the full package \makeatletter \def\re@DeclareMathSymbol#1#2#3#4{% \let#1=\undefined \DeclareMathSymbol{#1}{#2}{#3}{#4}} \re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116} \re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117} \re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118} \re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119} \re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46} \re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121} \re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12} \re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64} \re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6} \re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77} \re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77} \makeatother % \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE \makeatletter \def\Decl@Mn@Delim#1#2#3#4{% \if\relax\noexpand#1% \let#1\undefined \fi \DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}} \def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}} \def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}} \Decl@Mn@Open{\llangle}{mnomx}{'164} \Decl@Mn@Close{\rrangle}{mnomx}{'171} \Decl@Mn@Open{\lmoustache}{mnomx}{'245} \Decl@Mn@Close{\rmoustache}{mnomx}{'244} \makeatother % Widecheck \makeatletter \DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}} \def\@widecheck#1#2{% \setbox\z@\hbox{\m@th$#1#2$}% \setbox\tw@\hbox{\m@th$#1% \widehat{% \vrule\@width\z@\@height\ht\z@ \vrule\@height\z@\@width\wd\z@}$}% \dp\tw@-\ht\z@ \@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@ \setbox\tw@\hbox{% \raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box \tw@}}}% {\ooalign{\box\tw@ \cr \box\z@}}} \makeatother % \mathraisebox{voffset}[height][depth]{something} \makeatletter \NewDocumentCommand\mathraisebox{moom}{% \IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{% \IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}% }{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}% \mathpalette\@temp{#4}} \makeatletter % udots (taken from yhmath) \makeatletter \def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.} \mkern2mu\raise4\p@\hbox{.}\mkern1mu \raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}} \makeatother %% Fix array \newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}} %% \itexnum is a noop \newcommand{\itexnum}[1]{#1} %% Renaming existing commands \newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}} \newcommand{\widevec}{\overrightarrow} \newcommand{\darr}{\downarrow} \newcommand{\nearr}{\nearrow} \newcommand{\nwarr}{\nwarrow} \newcommand{\searr}{\searrow} \newcommand{\swarr}{\swarrow} \newcommand{\curvearrowbotright}{\curvearrowright} \newcommand{\uparr}{\uparrow} \newcommand{\downuparrow}{\updownarrow} \newcommand{\duparr}{\updownarrow} \newcommand{\updarr}{\updownarrow} \newcommand{\gt}{>} \newcommand{\lt}{<} \newcommand{\map}{\mapsto} \newcommand{\embedsin}{\hookrightarrow} \newcommand{\Alpha}{A} \newcommand{\Beta}{B} \newcommand{\Zeta}{Z} \newcommand{\Eta}{H} \newcommand{\Iota}{I} \newcommand{\Kappa}{K} \newcommand{\Mu}{M} \newcommand{\Nu}{N} \newcommand{\Rho}{P} \newcommand{\Tau}{T} \newcommand{\Upsi}{\Upsilon} \newcommand{\omicron}{o} \newcommand{\lang}{\langle} \newcommand{\rang}{\rangle} \newcommand{\Union}{\bigcup} \newcommand{\Intersection}{\bigcap} \newcommand{\Oplus}{\bigoplus} \newcommand{\Otimes}{\bigotimes} \newcommand{\Wedge}{\bigwedge} \newcommand{\Vee}{\bigvee} \newcommand{\coproduct}{\coprod} \newcommand{\product}{\prod} \newcommand{\closure}{\overline} \newcommand{\integral}{\int} \newcommand{\doubleintegral}{\iint} \newcommand{\tripleintegral}{\iiint} \newcommand{\quadrupleintegral}{\iiiint} \newcommand{\conint}{\oint} \newcommand{\contourintegral}{\oint} \newcommand{\infinity}{\infty} \newcommand{\bottom}{\bot} \newcommand{\minusb}{\boxminus} \newcommand{\plusb}{\boxplus} \newcommand{\timesb}{\boxtimes} \newcommand{\intersection}{\cap} \newcommand{\union}{\cup} \newcommand{\Del}{\nabla} \newcommand{\odash}{\circleddash} \newcommand{\negspace}{\!} \newcommand{\widebar}{\overline} \newcommand{\textsize}{\normalsize} \renewcommand{\scriptsize}{\scriptstyle} \newcommand{\scriptscriptsize}{\scriptscriptstyle} \newcommand{\mathfr}{\mathfrak} \newcommand{\statusline}[2]{#2} \newcommand{\tooltip}[2]{#2} \newcommand{\toggle}[2]{#2} % Theorem Environments \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem*{utheorem}{Theorem} \newtheorem*{ulemma}{Lemma} \newtheorem*{uprop}{Proposition} \newtheorem*{ucor}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{example}{Example} \newtheorem*{udefn}{Definition} \newtheorem*{uexample}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \newtheorem{note}{Note} \newtheorem*{uremark}{Remark} \newtheorem*{unote}{Note} %------------------------------------------------------------------- \begin{document} %------------------------------------------------------------------- \section*{Blog - network theory (part 9)} This page is a [[Blog articles in progress|blog article in progress]], written by [[John Baez]] and [[Brendan Fong]]. To see discussions of this article while it was being written, \href{http://www.math.ntnu.no/~stacey/Mathforge/Azimuth/comments.php?DiscussionID=823&Focus=5489#Comment_5489}{visit the Azimuth Forum}. For the final polished article \href{http://johncarlosbaez.wordpress.com/2011/09/13/network-theory-part-9/}{go to the Azimuth Blog}. \emph{\textbf{jointly written with [[Brendan Fong]]}} Last time we reviewed the rate equation and the master equation. Both of them describe reactions where things of various kinds can react and turn into other things. But: $\bullet$ In the rate equation, we assume the number of things varies continuously and is known precisely. $\bullet$ In the master equation, we assume the number of things varies discretely and is known only probabilistically. This should remind you of the difference between classical mechanics and quantum mechanics. However, the master equation is not , it's : it involves probabilities, but there's no uncertainty principle going on. Still, a lot of the math is similar. Now, given an equilibrium solution to the rate equation---one that doesn't change with time---we'll try to find a solution to the master equation with the same property. We won't succeed---but we often can! The theorem saying how was proved here: $\bullet$ D. F. Anderson, G. Craciun and T. G. Kurtz, . We will translate their proof into the language of annihilation and creation operators. This emphasizes the analogy to quantum mechanics. In particular, our equilibrium solution of the master equation is just like what people call a `coherent state' in quantum mechanics. So, if you know about quantum mechanics and coherent states, you should be happy. But if you don't, fear not!---we won't assume any knowledge of that stuff. To construct our equilibrium solution of the master equation, we need a special type of solution to our rate equation. We call this type a `complex balanced solution'. This essentially means that not only is the net rate of production of each species zero, but the net rate of production of each possible of species of zero. Before we make this more precise, let's remind ourselves of the basic setup. We'll consider a stochastic Petri net with a finite set $S$ of species and a finite set $T$ of transitions. For convenience let's take $S = \{1,\dots, k\}$, so our species are numbered from 1 to $k$. Then each transition $\tau$ has an input vector $m(\tau) \in \mathbb{N}^k$ and output vector $n(\tau) \in \mathbb{N}^k$. These say how many things of each species go in, and how many go out. Each transition also has rate constant $r(\tau) \in [0,\infty)$, which says how rapidly it happens. The rate equation concerns a vector $x(t) \in [0,\infty)^k$ whose $i$th component is the number of things of the $i$th species at time $t$. Note: we're assuming this number of things varies continuously and is known precisely! This should remind you of classical mechanics. So, we'll call $x(t)$, or indeed any vector in $[0,\infty)^k$, a . The says how the classical state $x(t)$ changes with time: \begin{displaymath} \frac{d x}{d t} = \sum_{\tau \in T} r(\tau)\, (n(\tau)-m(\tau)) \, x^{m(\tau)} \end{displaymath} You may wonder what $x^{m(\tau)}$ means: after all, we're taking a vector to a vector power! It's just an abbreviation, which we've seen plenty of times before. If $x \in \mathbb{R}^k$ is a list of numbers and $m \in \mathbb{N}^k$ is a list of natural numbers, we define \begin{displaymath} x^m = x_1^{m_1} \cdots x_k^{m_k} \end{displaymath} We'll also use this notation when $x$ is a list of . The vectors $m(\tau)$ and $n(\tau)$ are examples of what chemists call . A complex is just a bunch of things of each species. For example, if the set $S$ consists of three species, the complex $(1,0,5)$ can be thought of as a bunch consisting of one thing of the first species, none of the second species, and five of the third species. For our Petri net, the set of complexes is the set $\mathbb{N}^k$, and the complexes of particular interest are the $m(\tau)$ and the $n(\tau)$ of each transition $\tau$. We shall say a classical state $c \in [0,\infty)^k$ is if for all complexes $\kappa \in \mathbb{N}^k$ we have \begin{equation} \sum_{\{\tau : m(\tau) = \kappa\}} r(\tau) c^{m(\tau)} =\sum_{\{\tau : n(\tau) = \kappa\}} r(\tau) c^{m(\tau)} \label{complex_balanced}\end{equation} The left hand side of the above equation, which sums over the transitions in which the is $\kappa$, gives the rate of consumption of the complex $\kappa$. The right hand side, which sums over the transitions in which the is $\kappa$, gives the rate of production of $\kappa$. Thus the above equation requires that the net rate of production of the complex $\kappa$ is zero when the number of things of each species is given by the vector $c$. \textbf{Puzzle 1.} Show that if a classical state $c \in [0,\infty)^k$ is complex balanced, and we set $x(t) = c$ for all $t$, then $x(t)$ is a solution of the rate equation. Since $x(t)$ doesn't change with time here, we call it an of the rate equation. Since $x(t) = c$ is complex balanced, we call it equilibrium solution. \hypertarget{the_master_equation}{}\subsubsection*{{The master equation}}\label{the_master_equation} We've seen that any complex balanced classical state gives an equilibrium solution of the equation. The Anderson--Craciun--Kurtz Theorem says that it also gives an equilibrium solution of the equation. The master equation concerns a formal power series \begin{displaymath} \Psi(t) = \sum_{n \in \mathbb{N}^k} \psi_n(t) z^n \end{displaymath} where \begin{displaymath} z^n = z_1^{n_1} \cdots z_k^{n_k} \end{displaymath} and \begin{displaymath} \psi_n(t) = \psi_{n_1, \dots,n_k}(t) \end{displaymath} is the probability that at time $t$ our system has $n_1$ things of the first species, $n_2$ of the second species, and so on. Note: now we're assuming this number of things varies discretely and is known only probabilistically! So, we'll call $\Psi(t)$, or indeed any formal power series of this sort where the coefficients are probabilities summing to 1, a . (Earlier we just called it a `state', but that would get confusing now!) The says how the stochastic state $\Psi(t)$ changes with time: \begin{displaymath} \frac{d}{d t} \Psi(t) = H \Psi(t) \end{displaymath} where the $H$ is: \begin{equation} H = \sum_{\tau \in T} r(\tau) \, \left({a^\dagger}^{n(\tau)} - {a^\dagger}^{m(\tau)} \right) \, a^{m(\tau)} \label{master}\end{equation} The notation here is designed to neatly summarize some big products of annihilation and creation operators. For any vector $n \in \mathbb{N}^k$, we have \begin{displaymath} a^n = a_1^{n_1} \cdots a_k^{n_k} \end{displaymath} and \begin{displaymath} {a^\dagger}^n = {a_1^\dagger }^{n_1} \cdots {a_k^\dagger}^{n_k} \end{displaymath} Now suppose $c \in [0,\infty)^k$ is a complex balanced equilibrium solution of the rate equation. We want to get an equilibrium solution of the master equation. How do we do it? For any $c \in [0,\infty)^k$ there is a stochastic state called a , defined by \begin{displaymath} \Psi_c = \frac{e^{c z}}{e^c} \end{displaymath} Here we are using some very terse abbreviations. Namely, we are defining \begin{displaymath} e^{c} = e^{c_1} \cdots e^{c_k} \end{displaymath} and \begin{displaymath} e^{c z} = e^{c_1 z_1} \cdots e^{c_k z_k} \end{displaymath} Equivalently, \begin{displaymath} e^{c z} = \sum_{n \in \mathbb{N}^k} \frac{c^n}{n!}z^n \end{displaymath} where $c^n$ and $z^n$ are defined as products in our usual way, and \begin{displaymath} n! = n_1! \, \cdots \, n_k! \end{displaymath} Either way, if you unravel the abbrevations, here's what you get: \begin{displaymath} \Psi_c = e^{-(c_1 + \cdots + c_k)} \, \sum_{n \in \mathbb{N}^k} \frac{c_1^{n_1} \cdots c_k^{n_k}} {n_1! \, \cdots \, n_k! } \, z_1^{n_1} \cdots z_k^{n_k} \end{displaymath} Maybe now you see why we like the abbreviations. The name `coherent state' comes from quantum mechanics. In quantum mechanics, we think of a coherent state $\Psi_c$ as the `quantum state' that best approximates the classical state $c$. But we're not doing quantum mechanics now, we're doing probability theory. $\Psi_c$ isn't a `quantum state', it's a stochastic state. In probability theory, people like Poisson distributions. In the state $\Psi_c$, the probability of having $n_i$ things of the $i$th species is equal to \begin{displaymath} e^{-c_i} \, \frac{c_i^{n_i}}{n_i!} \end{displaymath} This is precisely the definition of a with mean equal to $c_i$. We can multiply a bunch of factors like this, one for each species, to get \begin{displaymath} e^{-c} \frac{c^n}{n!} \end{displaymath} This is the probability of having $n_1$ things of the first species, $n_2$ things of the second, and so on, in the state $\Psi_c$. So, the state $\Psi_c$ is a product of independent Poisson distributions. In particular, knowing how many things there are of one species says how many things there are of any other species! It is remarkable that such a simple state can give an equilibrium solution of the master equation, even for very complicated stochastic Petri nets. But it's true---at least if $c$ is complex balanced. Now we're ready to state and prove the big result: \textbf{Theorem (Anderson--Craciun--Kurtz).} Suppose $c \in [0,\infty)^k$ is a complex balanced equilibrium solution of the rate equation. Then $H \Psi_c = 0$. It follows that $\Psi_c$ is an equilibrium solution of the master equation. In other words, if we take $\Psi(t) = \Psi_c$ for all times $t$, the master equation holds: \begin{displaymath} \frac{d}{d t} \Psi(t) = H \Psi(t) \end{displaymath} since both sides are zero. \textbf{Proof.} To prove the Anderson--Craciun--Kurtz Theorem, we just need to show that $H \Psi_c = 0$. Since $\Psi_c$ is a constant times $e^{c z}$, it suffices to show $H e^{c z} = 0$. Remember that \begin{displaymath} H e^{c z} = \sum_{\tau \in T} r(\tau) \left( {a^\dagger}^{n(\tau)} -{a^\dagger}^{m(\tau)} \right) \, a^{m(\tau)} \, e^{c z} \end{displaymath} Since the annihilation operator $a_i$ is given by differentiation with respect to $z_i$, while the creation operator $a^\dagger_i$ is just multiplying by $z_i$, we have: \begin{displaymath} H e^{c z} = \sum_{\tau \in T} r(\tau) \, c^{m(\tau)} \left( z^{n(\tau)} - z^{m(\tau)} \right) e^{c z} \end{displaymath} Expanding out $e^{c z}$ we get: \begin{displaymath} H e^{c z} = \sum_{i \in \mathbb{N}^k} \sum_{\tau \in T} r(\tau)c^{m(\tau)}\left(z^{n(\tau)}\frac{c^i}{i!}z^i - z^{m(\tau)}\frac{c^i}{i!}z^i\right) \end{displaymath} Shifting indices and defining negative powers to be zero: \begin{displaymath} H e^{c z} = \sum_{i \in \mathbb{N}^k} \sum_{\tau \in T} r(\tau)c^{m(\tau)}\left(\frac{c^{i-n(\tau)}}{(i-n(\tau))!}z^i - \frac{c^{i-m(\tau)}}{(i-m(\tau))!}z^i\right) \end{displaymath} So, to show $H e^{c z} = 0$, we need to show this: \begin{displaymath} \sum_{i \in \mathbb{N}^k} \sum_{\tau \in T} r(\tau)c^{m(\tau)}\frac{c^{i-n(\tau)}}{(i-n(\tau))!}z^i =\sum_{i \in \mathbb{N}^k} \sum_{\tau \in T} r(\tau)c^{m(\tau)}\frac{c^{i-m(\tau)}}{(i-m(\tau))!}z^i \end{displaymath} We do this by splitting the sum over $T$ according to output and then input complexes, making use of the complex balanced condition: \begin{displaymath} \begin{array}{ccl} \sum_{i \in \mathbb{N}^k} \sum_{\kappa \in \mathbb{N}^k} \sum_{\{\tau : n(\tau)=\kappa\}} r(\tau)c^{m(\tau)}\frac{c^{i-n(\tau)}}{(i-n(\tau))!} \, z^i \; &=& \sum_{i \in \mathbb{N}^k} \sum_{\kappa \in \mathbb{N}^k} \frac{c^{i-\kappa}}{(i-\kappa)!}z^i \sum_{\{\tau : n(\tau) = \kappa\}} r(\tau)c^{m(\tau)} \\ &=& \sum_{i \in \mathbb{N}^k} \sum_{\kappa \in \mathbb{N}^k} \frac{c^{i-\kappa}}{(i-\kappa)!}z^i \sum_{\{\tau : m(\tau) = \kappa\}} r(\tau)c^{m(\tau)} \\ &=& \sum_{i \in \mathbb{N}^k} \sum_{\kappa \in \mathbb{N}^k} \sum_{\{\tau : m(\tau) = \kappa\}} r(\tau)c^{m(\tau)}\frac{c^{i-m(\tau)}}{(i-m(\tau))!} \, z^i \end{array} \end{displaymath} This completes the proof! We've already seen one example of this theorem, back in . We had this stochastic Petri net: We saw that the rate equation is just the logistic equation, familiar from population biology. The equilibrium solution is complex balanced, because pairs of amoebas are getting created at the same rate as they're getting destroyed, and amoebas are getting created at the same rate as getting destroyed. So, the Anderson--Craciun--Kurtz Theorem guarantees that there's an equilibrium solution of the master equation where the number of amoebas is distributed according to a Poisson distribution. And, we actually found this equilibrium solution! Next time we'll look at another example. category: blog \end{document}