\documentclass[12pt,titlepage]{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{mathtools}
\usepackage{graphicx}
\usepackage{color}
\usepackage{ucs}
\usepackage[utf8x]{inputenc}
\usepackage{xparse}
\usepackage{hyperref}
%----Macros----------
%
% Unresolved issues:
%
% \righttoleftarrow
% \lefttorightarrow
%
% \color{} with HTML colorspec
% \bgcolor
% \array with options (without options, it's equivalent to the matrix environment)
% Of the standard HTML named colors, white, black, red, green, blue and yellow
% are predefined in the color package. Here are the rest.
\definecolor{aqua}{rgb}{0, 1.0, 1.0}
\definecolor{fuschia}{rgb}{1.0, 0, 1.0}
\definecolor{gray}{rgb}{0.502, 0.502, 0.502}
\definecolor{lime}{rgb}{0, 1.0, 0}
\definecolor{maroon}{rgb}{0.502, 0, 0}
\definecolor{navy}{rgb}{0, 0, 0.502}
\definecolor{olive}{rgb}{0.502, 0.502, 0}
\definecolor{purple}{rgb}{0.502, 0, 0.502}
\definecolor{silver}{rgb}{0.753, 0.753, 0.753}
\definecolor{teal}{rgb}{0, 0.502, 0.502}
% Because of conflicts, \space and \mathop are converted to
% \itexspace and \operatorname during preprocessing.
% itex: \space{ht}{dp}{wd}
%
% Height and baseline depth measurements are in units of tenths of an ex while
% the width is measured in tenths of an em.
\makeatletter
\newdimen\itex@wd%
\newdimen\itex@dp%
\newdimen\itex@thd%
\def\itexspace#1#2#3{\itex@wd=#3em%
\itex@wd=0.1\itex@wd%
\itex@dp=#2ex%
\itex@dp=0.1\itex@dp%
\itex@thd=#1ex%
\itex@thd=0.1\itex@thd%
\advance\itex@thd\the\itex@dp%
\makebox[\the\itex@wd]{\rule[-\the\itex@dp]{0cm}{\the\itex@thd}}}
\makeatother
% \tensor and \multiscript
\makeatletter
\newif\if@sup
\newtoks\@sups
\def\append@sup#1{\edef\act{\noexpand\@sups={\the\@sups #1}}\act}%
\def\reset@sup{\@supfalse\@sups={}}%
\def\mk@scripts#1#2{\if #2/ \if@sup ^{\the\@sups}\fi \else%
\ifx #1_ \if@sup ^{\the\@sups}\reset@sup \fi {}_{#2}%
\else \append@sup#2 \@suptrue \fi%
\expandafter\mk@scripts\fi}
\def\tensor#1#2{\reset@sup#1\mk@scripts#2_/}
\def\multiscripts#1#2#3{\reset@sup{}\mk@scripts#1_/#2%
\reset@sup\mk@scripts#3_/}
\makeatother
% \slash
\makeatletter
\newbox\slashbox \setbox\slashbox=\hbox{$/$}
\def\itex@pslash#1{\setbox\@tempboxa=\hbox{$#1$}
\@tempdima=0.5\wd\slashbox \advance\@tempdima 0.5\wd\@tempboxa
\copy\slashbox \kern-\@tempdima \box\@tempboxa}
\def\slash{\protect\itex@pslash}
\makeatother
% math-mode versions of \rlap, etc
% from Alexander Perlis, "A complement to \smash, \llap, and lap"
% http://math.arizona.edu/~aprl/publications/mathclap/
\def\clap#1{\hbox to 0pt{\hss#1\hss}}
\def\mathllap{\mathpalette\mathllapinternal}
\def\mathrlap{\mathpalette\mathrlapinternal}
\def\mathclap{\mathpalette\mathclapinternal}
\def\mathllapinternal#1#2{\llap{$\mathsurround=0pt#1{#2}$}}
\def\mathrlapinternal#1#2{\rlap{$\mathsurround=0pt#1{#2}$}}
\def\mathclapinternal#1#2{\clap{$\mathsurround=0pt#1{#2}$}}
% Renames \sqrt as \oldsqrt and redefine root to result in \sqrt[#1]{#2}
\let\oldroot\root
\def\root#1#2{\oldroot #1 \of{#2}}
\renewcommand{\sqrt}[2][]{\oldroot #1 \of{#2}}
% Manually declare the txfonts symbolsC font
\DeclareSymbolFont{symbolsC}{U}{txsyc}{m}{n}
\SetSymbolFont{symbolsC}{bold}{U}{txsyc}{bx}{n}
\DeclareFontSubstitution{U}{txsyc}{m}{n}
% Manually declare the stmaryrd font
\DeclareSymbolFont{stmry}{U}{stmry}{m}{n}
\SetSymbolFont{stmry}{bold}{U}{stmry}{b}{n}
% Manually declare the MnSymbolE font
\DeclareFontFamily{OMX}{MnSymbolE}{}
\DeclareSymbolFont{mnomx}{OMX}{MnSymbolE}{m}{n}
\SetSymbolFont{mnomx}{bold}{OMX}{MnSymbolE}{b}{n}
\DeclareFontShape{OMX}{MnSymbolE}{m}{n}{
<-6> MnSymbolE5
<6-7> MnSymbolE6
<7-8> MnSymbolE7
<8-9> MnSymbolE8
<9-10> MnSymbolE9
<10-12> MnSymbolE10
<12-> MnSymbolE12}{}
% Declare specific arrows from txfonts without loading the full package
\makeatletter
\def\re@DeclareMathSymbol#1#2#3#4{%
\let#1=\undefined
\DeclareMathSymbol{#1}{#2}{#3}{#4}}
\re@DeclareMathSymbol{\neArrow}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\neArr}{\mathrel}{symbolsC}{116}
\re@DeclareMathSymbol{\seArrow}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\seArr}{\mathrel}{symbolsC}{117}
\re@DeclareMathSymbol{\nwArrow}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\nwArr}{\mathrel}{symbolsC}{118}
\re@DeclareMathSymbol{\swArrow}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\swArr}{\mathrel}{symbolsC}{119}
\re@DeclareMathSymbol{\nequiv}{\mathrel}{symbolsC}{46}
\re@DeclareMathSymbol{\Perp}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\Vbar}{\mathrel}{symbolsC}{121}
\re@DeclareMathSymbol{\sslash}{\mathrel}{stmry}{12}
\re@DeclareMathSymbol{\bigsqcap}{\mathop}{stmry}{"64}
\re@DeclareMathSymbol{\biginterleave}{\mathop}{stmry}{"6}
\re@DeclareMathSymbol{\invamp}{\mathrel}{symbolsC}{77}
\re@DeclareMathSymbol{\parr}{\mathrel}{symbolsC}{77}
\makeatother
% \llangle, \rrangle, \lmoustache and \rmoustache from MnSymbolE
\makeatletter
\def\Decl@Mn@Delim#1#2#3#4{%
\if\relax\noexpand#1%
\let#1\undefined
\fi
\DeclareMathDelimiter{#1}{#2}{#3}{#4}{#3}{#4}}
\def\Decl@Mn@Open#1#2#3{\Decl@Mn@Delim{#1}{\mathopen}{#2}{#3}}
\def\Decl@Mn@Close#1#2#3{\Decl@Mn@Delim{#1}{\mathclose}{#2}{#3}}
\Decl@Mn@Open{\llangle}{mnomx}{'164}
\Decl@Mn@Close{\rrangle}{mnomx}{'171}
\Decl@Mn@Open{\lmoustache}{mnomx}{'245}
\Decl@Mn@Close{\rmoustache}{mnomx}{'244}
\makeatother
% Widecheck
\makeatletter
\DeclareRobustCommand\widecheck[1]{{\mathpalette\@widecheck{#1}}}
\def\@widecheck#1#2{%
\setbox\z@\hbox{\m@th$#1#2$}%
\setbox\tw@\hbox{\m@th$#1%
\widehat{%
\vrule\@width\z@\@height\ht\z@
\vrule\@height\z@\@width\wd\z@}$}%
\dp\tw@-\ht\z@
\@tempdima\ht\z@ \advance\@tempdima2\ht\tw@ \divide\@tempdima\thr@@
\setbox\tw@\hbox{%
\raise\@tempdima\hbox{\scalebox{1}[-1]{\lower\@tempdima\box
\tw@}}}%
{\ooalign{\box\tw@ \cr \box\z@}}}
\makeatother
% \mathraisebox{voffset}[height][depth]{something}
\makeatletter
\NewDocumentCommand\mathraisebox{moom}{%
\IfNoValueTF{#2}{\def\@temp##1##2{\raisebox{#1}{$\m@th##1##2$}}}{%
\IfNoValueTF{#3}{\def\@temp##1##2{\raisebox{#1}[#2]{$\m@th##1##2$}}%
}{\def\@temp##1##2{\raisebox{#1}[#2][#3]{$\m@th##1##2$}}}}%
\mathpalette\@temp{#4}}
\makeatletter
% udots (taken from yhmath)
\makeatletter
\def\udots{\mathinner{\mkern2mu\raise\p@\hbox{.}
\mkern2mu\raise4\p@\hbox{.}\mkern1mu
\raise7\p@\vbox{\kern7\p@\hbox{.}}\mkern1mu}}
\makeatother
%% Fix array
\newcommand{\itexarray}[1]{\begin{matrix}#1\end{matrix}}
%% \itexnum is a noop
\newcommand{\itexnum}[1]{#1}
%% Renaming existing commands
\newcommand{\underoverset}[3]{\underset{#1}{\overset{#2}{#3}}}
\newcommand{\widevec}{\overrightarrow}
\newcommand{\darr}{\downarrow}
\newcommand{\nearr}{\nearrow}
\newcommand{\nwarr}{\nwarrow}
\newcommand{\searr}{\searrow}
\newcommand{\swarr}{\swarrow}
\newcommand{\curvearrowbotright}{\curvearrowright}
\newcommand{\uparr}{\uparrow}
\newcommand{\downuparrow}{\updownarrow}
\newcommand{\duparr}{\updownarrow}
\newcommand{\updarr}{\updownarrow}
\newcommand{\gt}{>}
\newcommand{\lt}{<}
\newcommand{\map}{\mapsto}
\newcommand{\embedsin}{\hookrightarrow}
\newcommand{\Alpha}{A}
\newcommand{\Beta}{B}
\newcommand{\Zeta}{Z}
\newcommand{\Eta}{H}
\newcommand{\Iota}{I}
\newcommand{\Kappa}{K}
\newcommand{\Mu}{M}
\newcommand{\Nu}{N}
\newcommand{\Rho}{P}
\newcommand{\Tau}{T}
\newcommand{\Upsi}{\Upsilon}
\newcommand{\omicron}{o}
\newcommand{\lang}{\langle}
\newcommand{\rang}{\rangle}
\newcommand{\Union}{\bigcup}
\newcommand{\Intersection}{\bigcap}
\newcommand{\Oplus}{\bigoplus}
\newcommand{\Otimes}{\bigotimes}
\newcommand{\Wedge}{\bigwedge}
\newcommand{\Vee}{\bigvee}
\newcommand{\coproduct}{\coprod}
\newcommand{\product}{\prod}
\newcommand{\closure}{\overline}
\newcommand{\integral}{\int}
\newcommand{\doubleintegral}{\iint}
\newcommand{\tripleintegral}{\iiint}
\newcommand{\quadrupleintegral}{\iiiint}
\newcommand{\conint}{\oint}
\newcommand{\contourintegral}{\oint}
\newcommand{\infinity}{\infty}
\newcommand{\bottom}{\bot}
\newcommand{\minusb}{\boxminus}
\newcommand{\plusb}{\boxplus}
\newcommand{\timesb}{\boxtimes}
\newcommand{\intersection}{\cap}
\newcommand{\union}{\cup}
\newcommand{\Del}{\nabla}
\newcommand{\odash}{\circleddash}
\newcommand{\negspace}{\!}
\newcommand{\widebar}{\overline}
\newcommand{\textsize}{\normalsize}
\renewcommand{\scriptsize}{\scriptstyle}
\newcommand{\scriptscriptsize}{\scriptscriptstyle}
\newcommand{\mathfr}{\mathfrak}
\newcommand{\statusline}[2]{#2}
\newcommand{\tooltip}[2]{#2}
\newcommand{\toggle}[2]{#2}
% Theorem Environments
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\newtheorem{prop}{Proposition}
\newtheorem{cor}{Corollary}
\newtheorem*{utheorem}{Theorem}
\newtheorem*{ulemma}{Lemma}
\newtheorem*{uprop}{Proposition}
\newtheorem*{ucor}{Corollary}
\theoremstyle{definition}
\newtheorem{defn}{Definition}
\newtheorem{example}{Example}
\newtheorem*{udefn}{Definition}
\newtheorem*{uexample}{Example}
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\newtheorem{note}{Note}
\newtheorem*{uremark}{Remark}
\newtheorem*{unote}{Note}
%-------------------------------------------------------------------
\begin{document}
%-------------------------------------------------------------------
\section*{Blog - Petri net programming (part 5)}
This page is a [[blog article in progress]], written by [[David Tanzer]].
\emph{guest post by \textbf{[[David Tanzer]]}}
Now let’s take a closer look at the movement of the particles. We will assume that the process consists of the aggregation of many independent, separate, low-probability events. This is called a Poisson process.
Let G be a small test region within the container. Imagine a kind of “Geiger counter” that clicks whenever a particle enters G. Let T be a time interval (t1,t2). Then in each of trial of the experiment (stir of the soup), we can count the number of clicks that occur between t1 and t2. This is a random variable, which we write as A(G,T).
By choosing different values for the interval T, this apparatus gives us a system of random variables A(G,T).
There are three properties of this system of variables that characterize it as a Poisson process: time independence of disjoint intervals, vanishing probability of simultaneous events, and the existence of a well-defined rate parameter.
1) \textbf{Independent increments.} If T and T’ are time intervals that don’t overlap, then the random variables A(G,T) and A(G,T’) are \emph{independent} of each other; knowing of how many clicks occurred during T gives us no information about how many clicks can be expected during the interval T’. Here is the precise statement of the independence condition:
Prob(A(G,T) = n and A(G,T’) = m) = Prob(A(G,T) = n) * Prob(A(G,T’) = m).
We did have to stipulate that T and T’ do not overlap. If they do overlap, then A(G,T) and A(G,T’) cannot be independent. For instance, if T is contained in T’, and one thousand clicks are measured during T, then we know that at least 1000 clicks will occur during T’.
For an example of a process where the independence condition doesn't hold true, consider (an approach that produces) a soup with moving bubbles of low concentration. Then consider two nearby, non-overlapping intervals T and T’. If in a trial of the experiment, it was found that A(G,T) was lower than the expected value, then, due to the presence of low density bubbles, then we could predict lower values during the nearby experiment A(G,T’).
2) \textbf{Vanishing probability of simultaneous events.} At every point in time t, we can meaningfully define the instantaneous rate at which the process is running. More specifically, this means that for small time intervals, the probability of there being a click during T is proportional to the duration of T. So if we halve the length of the interval, then we halve the probability of a click occurring.
This assumption allows us to meaningfully define the probability per unit time of an event occurring. To form an estimate of this ``probability rate'', just divide the probability of an event occurring during T, by the length of T: Prob(A(G,T) {\tt \symbol{62}} 0) / length(T). As T gets smaller, this estimate of the instantaneous probability rate gets better and better. The probability rate is defined as this limit:
ProcessRate = Lim(length(T) -{\tt \symbol{62}} 0, Prob(A(G,T) {\tt \symbol{62}} 0) / length(T)).
The process rate may vary with time. If it is constant, then the process is called a homogenous Poisson process.
3) \textbf{Existence of an instantaneous rate parameter.} For a small time interval T, the probability of there being multiple clicks of the counter is vanishingly small.
Here is the idea. Whereas the chance of freak accident is small, the chance of two freak accidents happening at the same time is astronomically smaller. This is an assumption about the independence of the freak accidents, for if accidents A and B generally occur together, then the chance of the two of them happening will actually be \emph{greater} than the chance of just one of them happening.
This condition would be violated if the particles tended to travel in pairs.
((We write P(A(G,T) {\tt \symbol{62}} 1) = o(deltaT).))
Whereas the probability of a single click is also small, it is not \emph{vanishingly} small in comparison with the length of the time interval: in the limit, it decreases linearly with the length of the time interval. The chance of multiple clicks, however, goes to zero much faster than the length of T goes to zero:
Lim(length(T) -{\tt \symbol{62}} 0, Prob(A(G,T) {\tt \symbol{62}} 1) / length(T)) = 0.
We wish now to analyze the process of the \emph{reactions}, under the assumption that the movements of the particles are Poisson. For a test region G and an interval of time T, we can imagine a ``Geiger counter'' that clicks every time a reaction takes place in G x T.
In the kinetic model called the ``law of mass action,'' the rate of a reaction is is proportional to the product of the concentrations of its input species.
We will now proceed to give a proof of the law, which is an approximation that is valid over a range of concentrations, in the chief case of a reaction that takes two input species.
In real reactions, the likelihood of more than two \_\_\_\_ (reactants?)) meeting at the same place is far less than two of them meeting, so
For a transition taking a single input, this law just states that the reaction rate is proportional to the concentration of the species, which is clearly true. We will now prove it for a reaction that takes two input species. You can check that the ideas here can be extended to transitions with more inputs. In practice, this binary reaction form has a paramount place, because the likelihood of three or more particles coming together at once is extremely unlikely. So a reaction that is written A + B + C $\rightarrow$ D will in effect be reducible to a cascade of binary reactions, like A + B $\rightarrow$ TempProduct, TempProduct + C $\rightarrow$ D.
Suppose that in order to react, an A particle and a B particle must be within a distance of $\epsilon$ from each other. In the model of the particles as spheres, Epsilon will be the radius of a plus the radius of b.
Define an \textbf{epsilon-crossing} as the event that the world-lines for a and b come within a distance of $\epsilon$. Then the probability that they will react is the probability that the world-lines of a and b have an $\epsilon$-crossing, times the conditional probability Q of them reacting, given that they have an $\epsilon$-crossing.
Let's now analyze the process of the eps-crossings. This will give us a handle on the rate of the reactions, which will be Q times lower than the rate of eps-crossings.
Let p be the probability that an \emph{individual} A and B particle will have an $\epsilon$-crossing within G x T.
Suppose that the system has NA A particles, and NB B particles.
Let Xij,T be the event that ai and bj have an eps-crossing within T.
Lemma 1. P(dt)(Xij) = alpha dt, for some constant alpha. To see this divide the interval T into two halves:
T1 = (b, t + dt/2), T2 = (t + dt/2, t + dt).
Then Xij,T = Xij,T1 or Xij,T1
Since this is a disjoint union,
P(Xij,T) = 2 P(Xij,T1).
i.e., P(dt)(Xij) = 2 P(dt/2)(Xij).
P(dt/2)(Xij) = (1/2)P(t)Xij).
This proves that P(dt)(Xij) is proportional to dt.
QED Lemma 1.
Lemma 2. The Eps-crossing process satisfies the rare-multiple event Poisson axiom.
Now there are multiple crossings iff xij {\tt \symbol{94}} xkl, for some (i,j) {\tt \symbol{60}}{\tt \symbol{62}} (k,l).
All such Xij and Xkl are independent events.
So multiple crossings = the disjunction of Xij {\tt \symbol{94}} Xkl, for (i,j) {\tt \symbol{60}}{\tt \symbol{62}} (k,l).
The probability of a multiple crossing is therefore bounded above by
SUM p(Xij {\tt \symbol{94}} Xkl) = SUM P(Xij) * P(Xkl) = SUM (alpha dt) * (alpha dt) = K alpha{\tt \symbol{94}}2 dt{\tt \symbol{94}}2 = o(dt).
QED Lemma 2
Now the probability that there is some $\epsilon$-crossing =
\hypertarget{por_xij__sum_pxij__sum_pxij__xkl_for_ij__kl__}{}\section*{{P(OR Xij) = SUM P(Xij) - SUM P(Xij {\tt \symbol{94}} Xkl) (for (i,j) {\tt \symbol{60}}{\tt \symbol{62}} (k,l)) + \ldots{}}}\label{por_xij__sum_pxij__sum_pxij__xkl_for_ij__kl__}
= p * NA * NB + o(dt)
This proves that the process rate is proportional to NA NB.
Now this eps-crossing process satisfies the independent increments condition, and so it is a Poisson process, with rate parameter proportional to NA NB.
Claim 1. Prob(reactions {\tt \symbol{62}} 1) = o(dt).
Proof. reactions {\tt \symbol{62}} 1 iff crossings {\tt \symbol{62}} 1 \&\& some of the crossings reacted
prob(reactions {\tt \symbol{62}} 1) = prob(crossings {\tt \symbol{62}} 1 \&\& some of the crossings reacted)
{\tt \symbol{60}}= prob(crossings {\tt \symbol{62}} 1) = o(dt).
Prob(a reaction took place) =
Prob(a crossing took place) * Prob(a crossing reacts) =
Q * Prob(a crossing took place)
i.e.,
Prob(reactions {\tt \symbol{62}} 0) = Q * prob(crossings {\tt \symbol{62}} 0)
= Prob(reactions = 1) + o(dt) = Q(alpha dt * NA * NB + o(dt))
= beta NA NB dt + O(dt).
This proves both the law of mass action, and the well-definedness of the process rate parameter.
Note, however, that the reaction process is not a full-fledged Poisson process, because it does not satisfy the principle of independent increments. Suppose there are 1000 (water) particles in the system (at the start). If, in one run of the experiment, we found that all 1000 dissociated in interval T, then we \emph{know} that in all subsequent intervals, the count must be zero -- and this is a violation of the independence principle.
Nevertheless, it is ``locally Poisson,'' in the sense that as the time interval T gets smaller, it looks more and more like a Poisson process; the subdivisions of T are ``almost independent'' of each other, since the expected changes in concentrations across T become vanishingly small. In the next article, we will see how all these characteristics of the reaction process are used in the analysis to develop a simulation algorithm.
Here is a question to ponder: Can we keep doubling the concentration of one of the reactants, and expect the instantaneous reaction rate to keep doubling along with it?
To begin with, we can't keep doubling the concentration, because the particles have a non-zero diameter, and so at a certain point, the container will become fully packed. This is what happens when a gas becomes compressed to the point where it changes phase to an incompressible liquid, or a solid.
But even within the range of attainable concentrations, the diameters of the particles will cause the reaction rate to increase less than linearly with an increase in the concentrations of one of the reactants. This effect becomes negligible at low concentrations, and becomes pronounced at high concentrations.
A closer examination of our proof will provide insight into this limitation of the law of mass action.
Suppose that there are a fixed number of B particles in the system, and that we double the number of A particles. We are indeed doubling the number of opportunities for an A particle and a B particle to react. But the A particles are “competing” for reactions with the B particles, which dilutes the probability that an eps-crossing between an A and a B will lead to a reaction. The higher the concentration, the greater the “competition,” and the greater the reduction from the value predicted by the law of mass action.
Here is where the issue comes up in the proof we gave using the Poisson process of eps-crossings. But the conditional probability Q that an eps-crossing reacts, is not actually a constant: it decreases as the concentrations increase. If particle a has a simultaneous eps-crossing with particle b1 and b2, then the presence of each b reduces the chances that the other will react with a.
Let Q0 be the conditional probability, where there are no multiple eps-crossings. Then the extent to which the actual Q is less than Q0, depends upon the proportion of multiple crossings out of the whole population of crossings.
Let us now qualitatively analyze the probability that a randomly chosen eps-crossing is a multiple crossing. Let a and b be two particles that participate in the crossing.
Now each of the N particles has to be \emph{somewhere} in the container, which has volume V, at time t. So let V(e) = the volume of the eps-sphere (4 eps{\tt \symbol{94}}3 / 3), and further let V(eps) / V = the percentage of the volume occupied by the Epsilon-sphere around b. The number of particles whose center lies in this sphere at time t, is simply the number of particles NA times this proportion, i.e. NA * V(eps) / V. Now NA/V is the concentration per unit volume of A, and its reciprocal V/NA is the amount of the volume V that is pro-rata distributed to each particle -- call this ParticleVolume(A).
Rewriting this formula, we get that the expected number of ``intruder'' A particles in the sphere, is V(eps) / ParticleVolume(A).
Let ParticleSeparation(A) = CubeRoot(ParticleVolume(a)). Write this as:
K (eps/ParticleSeparation(A)){\tt \symbol{94}}3.
So, if the particle separation is compared to the particle diameter, e.g. in a rarefied gas, binary crossings predominate, and the multiplicative law is a good approximation. But the closer the spacing gets to epsilon -- i.e. the closer it gets to being fully packed, the more that ratio deviates from zero, and, at the fully packed state, it equals K. This is related to the number of neighbors that a particle can have in three dimensions.
In the case where our system was in a gaseous state, we have just described the case where the gas condenses to a liquid.
Another situation is when the reactants are dissolved in a solution. Then the world-lines are very zig-zaggy, because the particles are constantly colliding with the molecules of the surrounding fluid. Nevertheless, that process of analyzing epsilon-crossings still should hold up, We reach the same conclusion,
The law of mass action also breaks down at very low concentrations, when there are multiple inputs from the same species to a transition. Consider a transition for the formation of H: H + H $\rightarrow$ H.
Suppose there was only \emph{one} H in the system, Then our transition could never fire, but the rate formula would give us a non-zero firing rate:
NH * NH = 1.
So let's review the proof of the mass action kinetics, to find the source of the discrepancy. There, we said that the number of possible $\epsilon$-crossings was equal to the number of A particles times the number of B particles. This would say that the number of $\epsilon$-crossings is one times one, but in fact there are \emph{zero} possible $\epsilon$-crossings with a single A particle.
More generally, if there are n A particles, the \_\_ is then n * (n - 1) / 2, then the number of possible crossings equals the number of ways to choose 2 members out of the set of n, i.e., n-choose-2, (n 2). More generally, if the reaction takes k inputs (from A), the value is (n k), equal to the number of subsets of size k out of a set of size n:
n(n-1)\ldots{}(n-k+1)/k!.
Since the k! is a constant, we can absorb it into the rate constant, and state the law as\ldots{}
\vspace{.5em} \hrule \vspace{.5em}
Therefore the product of the concentrations, which drives the transition rate, should have the form NH * (NH - 1). More generally, if T takes k inputs from A, then the factor for A should be
N * (N -1 ) * ,,,, * (N - K + 1)
This is like N{\tt \symbol{94}}k, but less; it is called the \emph{falling power} of N to the k, and is written N{\tt \symbol{94}}k\_. Finally, for the general formula, we multiply all such factors together, which one factor per input species.
Suppose T has input species A1,\ldots{},An, and the respective input degrees are d1,\ldots{},dn. Then the firing rate for T is proportional to
rateConstant(T) * A1{\tt \symbol{94}}d1\_ * \ldots{} * An{\tt \symbol{94}}dn\_.
We've defined a stochastic Petri net as a Petri net with rate constants, and have introduced a kinetics for this model.
Returning to our pragmatic program, the next question that arises is: how do we implement it?
That is the subject of the next blog article, which will explore the algorithmic issues raised by simulating a stochastic Petri net with Poisson-process mass-action kinetics. There we will an expository presentation of a simulator program, and apply it to the task of experimentally determining the statistical behavior of some specific reaction networks.
The use of mathematics here can be liked to the use of astronomy by (ancient) mariners, to give them one of the key hopes of navigating through the rough seas ahead of them.
Aside from the intrinsic interest and beauty of this substratum of the statistics of everyday reality,
We shall also gain directly from the theory of Poisson processes, as results of this theory will provide us with a major optimization to the Petri net simulator algorithm.
\begin{itemize}%
\item Prove the propositions 1 and 2
\item We defined the probability rate at a point in time. But if T is scaled too large, it would say that the probability of an event is greater than one. Explain.
\item Can you give an example of a process that doesn't have a well-defined rate parameter?
\item Prove the mass action kinetics for a transition that takes three input species.
\item Prove it in general.
\item Write a simulator program for stochastic Petri nets. As an option, you can use the simulator from the first blog as a point of departure.
\end{itemize}
References: Feres, Network Series, Feinberg.
category: blog
\end{document}