c$.
{Let us define the events
\[
E_m:=\{W_{a_k}(n) \ge c n,\, \forall n\ge m\} \qquad (m\geq 1).
\]
Then, $\bigcup_m E_m$ holds almost surely.}
Let $\tau_n$ be the time
when edge~$a_k$ is reinforced for the $n$-th time
(so by definition $W_{a_k}(\tau_n) = n+1$).
Note that by definition $\tau_n\ge n$ for all $n\ge 1$. Thus, for all $n\ge m$, on $E_m$, using that {$W_{a_{k-1}}(\tau_n)\le \tau_n+1$ and $\tau_n\le (n+1)/c$, one has
\[
\frac{W_{a_k}(\tau_n)}{W_{a_k}(\tau_n) + W_{a_{k-1}}(\tau_n)} \ge \frac{c(n+1)}{c(n+1)+n+1+c} \ge c/2.
\] }
It follows that, almost surely on the event $E_m$, if the weight of $a_{k+1}$ is zero, then,
for all $n\ge m$, the $\tau_n$-th ant makes at least a geometric number
of crossings of edge $a_k$, with success probability $\nu:=1-c/2$,
before jumping across edge $a_{k-1}$.
Consequently, on $E_m$, and for $n\ge m$,
the probability to reinforce edge $a_{k+1}$ at time $\tau_n$
is at least $1- (1-\rho(n))^{X_n}$,
where $(X_n)_{n\ge 0}$ is a sequence of i.i.d.\ geometric random variables with parameter~$\nu$,
and
\[
\rho(n) = \frac{W_{a_{k+1}}(\tau_n)}{W_{a_{k+1}}(\tau_n)+ n+ 1}.
\]
Thus, letting ${u}_{k+1}(n) = W_{a_{k+1}}(\tau_n)/(n+1)$, we find
\ba
\mathbb E[W_{a_{k+1}}(\tau_{n+1}) -W_{a_{k+1}}(\tau_n)\mid \mathcal F_{\tau_n}]
&\geq \mathbb E[1-(1-\rho(n))^{X_n}\mid W_{a_{k+1}}(\tau_n)]\\
&= 1-\frac{\nu(1-\rho(n))}{1-(1-\nu)(1-\rho(n))}\\
&= \frac{\rho(n)}{1- (1-\nu)(1-\rho(n))}
= \frac{{u}_{k+1}(n)}{{u}_{k+1}(n) + \nu},
\ea
with $(\mathcal F_i)_{i\ge 0}$ the natural filtration of the process,
and where we used that $\mathbb E x^{X_n} = \nu x/(1-(1-\nu)x)$ for all $x\in(0,1)$.
It follows that on the event $E_m$, the process $(W_{a_{k+1}}(\tau_n))_{n\ge m}$ stochastically dominates a $G$-urn process (starting from $W_{a_{k+1}}(\tau_m)$),
with $G(x): = \spfrac{x}{x+\nu}$.
Since $G(x)> x$ for all $x\in(0,c/2)$, it follows from Proposition~\ref{lem.urne} that almost surely
$\liminf {u}_{k+1}(n) \ge c/2$. We deduce the induction step, using that by hypothesis, $\limsup \tau_n/n \le 1/c$.
\end{proof}
\Subsection{The stochastic algorithm and
a sequence of decreasing compact subspaces.}\label{subsec.2paths.2}
Recall that we set $\hat \bW(n) := \bW(n)/(n+1)$, for all $n\geq 0$.
Note also that, by~definition of the model, for all $n\geq 0$,
\begin{multline*}
\hat {\bW}(n) \in \mathcal E' \\
:=
\{\bs w \in \mathcal E \colon w_{a_p} = 1 - w_{b_q},\, w_{b_q}\leq w_{b_{q-1}}\leq\cdots \leq w_{b_1},\,
w_{a_p}\leq w_{a_{p-1}}\leq\cdots\leq w_{a_1}\},
\end{multline*}
with $\mathcal E$ as defined in \eqref{def.E}.
Moreover, for all $n\geq 0$, we have
\[
\hat \bW(n+1) = \hat\bW(n) +
\frac1{n+2}\,\bigl(F(\hat\bW(n)) + \xi_{n+1}\bigr),
\]
with $\xi_{n+1}$ some martingale difference and with $F$ as defined in~\eqref{def.F}. More specifically the coordinates of $F$ can be computed explicitly here,
and are given by, for all $k$ and~$\ell$ such that $1\leq k\leq p$ and $1\leq \ell\leq q$, for all $\bs w\in \mathcal E'$,
\begin{equation}\label{eq:Fab}
F_{a_k}(\bs w) = \frac{S^a_k(\bs w)}
{S^a_k(\bs w)+S^b_q(\bs w)}
-w_{a_k}
\quad\text{ and }\quad
F_{b_\ell}(\bs w) = \frac{S^b_\ell(\bs w)}
{S^b_\ell(\bs w)+S^a_p(\bs w)}
-w_{b_k},
\end{equation}
where we have defined, for any $\bs w \in [0,1]^E$, $m\in\{a,b\}$,
and $s$ an integer such that $1\leq s\leq p$ if $m = a$, and $1\leq s\leq q$ if $m = b$,
\begin{equation}\label{eq:def_Sq}
S^m_s(\bs w) = \frac1{\sum_{i=1}^s\sfrac1{w_{m_i}}}.
\end{equation}
Note that, for all $k$ such that $1\le k\le p$, $S_k^a(\bs w)=0$ if and only if $w_{a_i}=0$ for some~$i$ such that $1\le i \le k$,
and for all $\ell$ such that $1\le \ell\le q$, $S_\ell^b(\bs w)=0$ if and only if $w_{b_i}=0$ for some $i$ such that $1\le i \le \ell$.
To prove Theorem~\ref{th:two_paths}, we use the ODE method and thus start by studying the solutions of the equation ${\bs{\dot y}} =F({\bs y})$. To do so, we define a sequence $(K_n)_{n\geq 0}$ of decreasing compact subsets of $\mathcal E'$ such that (A) for all $n\geq 0$, $L(\bW)\subseteq K_n$, and (B) the intersection of all these compacts is $\{\bs w^*\}$, where $w_{a_k}^* = \alpha^k$ and $w_{b_\ell}^* = \beta^\ell$, with $(\alpha, \beta)$ as in Theorem ~\ref{th:two_paths}.
We prove (A) in Section~\eqref{subsec.2paths.3}, and (B) in Section~\ref{subsec.2paths.4}.
In the rest of this section, we define the sequence $(K_n)_{n\geq 0}$ and show that it is decreasing,
\ie that $K_{n+1} \subset K_n$ for all $n\ge 0$.
For this we need some additional notation.
For all $\bs u,\bs v \in \mathcal E'$, we let
\[
H_{a_k}(\bs u,\bs v)
= \frac{S^a_k(\bs u)}{S^a_k(\bs u)+S^b_q(\bs v)}
\quad\text{ and }\quad
H_{b_\ell}(\bs u,\bs v)
= \frac{S^b_\ell(\bs u)}{S^b_\ell(\bs u)+S^a_p(\bs v)},
\]
for all $k$ and $\ell$ such that $1\leq k\leq p$ and $1\leq \ell\leq q$.
Recall further that by Proposition~\ref{prop:multiedges}, there exists a constant $c_{p,q}>0$, such that almost surely
\begin{equation}\label{K0def}
L(\bW) \subseteq \{\bs w\colon w_e\ge c_{p,q}, \text{ for all }e\in E\}.
\end{equation}
{We also define $\bs w^*$ as the limiting vector appearing in the statement of Theorem~\ref{th:two_paths}. More precisely, we have }
\begin{equation}\label{w*}
{w_{a_k}^* = \alpha^k, \quad \text{and} \quad w_{b_\ell}^* = \beta^\ell,}
\end{equation}
{for all $k$ and $\ell$ such that $1\le k\le p$, $1\le \ell \le q$, with $(\alpha,\beta)$ the unique solution of the system~\eqref{eq:system} in $(0,1)^2$ (existence and {uniqueness} of the solution for this system of equations will be proved later, at the end Section~\ref{subsec.2paths.4}).}
We then define $\bs u^{(0)}$ and~$\bs v^{(0)}$~by
\[
u^{\sss (0)}_{a_k}=u_0^k, \quad
u^{\sss (0)}_{b_\ell}=u_0^\ell,\quad \text{ and }\quad
v^{\sss (0)}_{a_k}=v^{\sss (0)}_{b_\ell}=1,
\]
for all $k$ and $\ell$ such that $1\le k\le p$, $1\le \ell \le q$, with $u_0$ chosen arbitrarily so that
\[
0< u_0<\min(1-\nicefrac1q,\alpha,\beta,c_{p,q}).
\]
The fact that we choose $u_0\le \min(\alpha,\beta)$ entails in particular
\[
0< u^{\sss (0)}_e\leq w^*_e, \quad \text{for all }e\in E.
\]
Next we define inductively two sequences $(\bs u^{\sss (n)})_{n\ge 0}$ and $(\bs v^{\sss (n)}))_{n\ge 0}$, by
\[
\bs u^{\sss (n+1)}= H(\bs u^{\sss (n)}, \bs v^{\sss (n)}), \quad \text{and}\quad \bs v^{\sss (n+1)}= H(\bs v^{\sss (n)}, \bs u^{\sss (n)}).
\]
Finally, we define the sequence $(K_n)_{n\ge 0}$ by
\[
K_n:=\{\bs w\in \mathcal E'\colon u^{\sss (n)}_e\le w_e \le v^{\sss (n)}_e, \text{ for all }e\in E\} \quad (\forall n\ge 0).
\]
We prove now that this sequence is decreasing, that is, $K_{n+1}\subset K_n$, for all $n\ge 0$. More precisely, we prove that, for all $n\ge 0$,
\ban
\bs u^{\sss (n)}&__0$, which entails that $h$ is decreasing in a neighbourhood of~$t$ since both its right and left derivatives are negative at this time (it is differentiable if $|u(t)|\neq |v(t)|$).
Symmetrically, the same holds if $u(t)<0\le v(t)$.
Finally when $u(t) = v(t)\neq 0$, we see that $u'(t) = 0$ while
$v'(t)\neq 0$, so that in a neighbourhood of~$t$,
$u(t)\neq v(t)$, and thus by the previous argument $h$ is again decreasing in a neighbourhood of~$t$.
As a consequence, $h$ is decreasing up to the (possibly infinite) time when it reaches~$0$, and thus converges. Note also that the previous arguments show that~$h$ has a negative right-derivative at any non-zero value, which implies that its only possible limit is zero.
This indeed implies that $\bs \Phi(t)$ converges to the set $\mathcal H:= \mathcal E'\cap \{w_2={\spfrac{w_1}{w_1+w_4}}=\nicefrac12\}$, as claimed.
We now prove that $\bs \Phi(t)$ converges to the set $\mathcal H':=\mathcal H \cap \{w_3=1/2\}$.
Indeed, observe that for any $\bs w\in \mathcal H$,
\[
F_3(\bs w) = \frac{w_3}{w_3+\nicefrac12} - w_3,
\]
thus $F_3(\bs w) >0$, if $w_3<1/2$, and $F_3(\bs w)<0$ if $w_3>1/2$.
Since $F_3$ is continuous and $\mathcal E$ is compact, $F_3$ is also positive in a neighbourhood of $\mathcal H \cap \{w_3\le 1/2 - \varepsilon\}$,
and negative in a neighbourhood of $\mathcal H \cap \{w_3\ge 1/2 + \varepsilon\}$, for any fixed $\varepsilon >0$.
Since we also know that $\bs \Phi(t)$ converges to $\mathcal H$, it follows that it converges to
$\mathcal H \cap \{1/2-\varepsilon \le w_3\le 1/2+\varepsilon\}$, for any $\varepsilon>0$. In other words
it converges well to $\mathcal H'$, proving the claim.
Finally, note that for any $\bs w\in \mathcal H'$, one has
\[
F_1(\bs w) =
\frac12+\frac12\cdot\frac{\frac{w_1}{1+w_1}\Bigl(\frac12+\frac{\nicefrac12}{1+w_1}\Bigr)}
{1-\frac12\frac{w_1}{1+w_1}-\frac{\nicefrac14}{(1+w_1)^2}}-w_1
=\frac12+\frac{w_1(2+w_1)}{2w_1^2+6w_1+3}-w_1.
\]
Then one can check that $F_1(\bs w)>0$ if and only if
$f(w_1) >0$ where, for all $x\in \mathbb R$,
\[
f(x)= -2x^3-4x^2+2x+\frac32.
\]
Note that $f$ is a polynomial of degree~3, it thus has at most three zeros in $\mathbb R$.
One can check that $f'$ is positive on $((-2-\sqrt{7})/3,(-2+\sqrt{7})/3)$ and non-positive on the complement of this set. Thus, on $[0,1]$, $f$ is non-decreasing on $[0, (-2+\sqrt{7})/3]$ and non-increasing on $[(-2+\sqrt{7})/3, 1]$. Since $f(0)= \nicefrac32>0$ and $f(1)=-\nicefrac52$, we get that there exists a unique solution to $f(x) = 0$ on $[0,1]$, which we call $w^*$.
Moreover, $f(x)>0$ for all $x\in [0, w^*)$ and $f(x)<0$ for all $x\in (w^*, 1]$. The conclusion follows, using again continuity of $F_1$ and compactness of $\mathcal H'$, as above.
\end{proof}
The proof of Proposition \ref{prop:lozenge} now follows from Corollary~\ref{cor:pemantle}. Indeed, by Lemma~\ref{lem:liminf_lozenge},
the limiting set $L(\bs {\hat W})$ of the stochastic approximation $(\bs {\hat W}(n))_{n\ge 0}$ is contained in the set $\mathcal U$, which was defined in Lemma~\ref{lem:conv.lozenge}.
Then Lemma~\ref{lem:conv.lozenge} and Corollary~\ref{cor:pemantle} imply that $L(\bs {\hat W})= \{(w^*, \nicefrac12, \nicefrac12, w^*, \nicefrac12)\}$, as wanted.
\appendix
\section*{Appendix. Calculating \texorpdfstring{$F$}{F} in the lozenge case: proof of \texorpdfstring{\eqref{eq:formule_lozenge}}{eqref}}\refstepcounter{section}
\label{sec:app}
We use the same notation as in Section~\ref{sec:lozenge}.
To prove~\eqref{eq:formule_lozenge}, we use the {electrical networks} method (see, e.g.~\cite{LL}).
We start by calculating $p_2(\bs w)$: this is the probability that a random walker on the graph with weights $\bs w = (w_i)_{1\leq i\leq 5}$, starting from~$N$, crosses edge~2 before crossing edge~5.
This is equal to the probability that a walker starting from $N$ reaches $F_2$ before $F_5$ on the weighted graph of Figure~\ref{fig:p2}.
We~decompose $p_2(\bs w)$ according to the first step of the walker:
\begin{equation}\label{eq:p2}
p_2(\bs w) = \frac{w_1}{w_1+w_4}\cdot p_{22}(\bs w)+\frac{w_4}{w_1+w_4}\cdot p_{25}(\bs w),
\end{equation}
where $p_{22}(\bs w)$ (\resp $p_{25}(\bs w)$) denotes the probability to reach $F_2$ before $F_5$ starting from $P_2$ (\resp $P_5$) on the graph of Figure~\ref{fig:p2}.
By classical formulas for random walks on weighted graphs (see, e.g.~\cite{LL}),
\[
p_{22}(\bs w) = \frac{\mathcal C_{P_2F_2}(\bs w)}{\mathcal C_{P_2F_2}(\bs w)+\mathcal C_{P_2F_5}(\bs w)},
\]
where $\mathcal C_{XY}(\bs w)$ is the effective conductance between vertices $X$ and $Y$ when the edge weights are given by~$\bs w$. By definition of effective conductances, the effective conductance of a single edge is its weight. Thus, $\mathcal C_{P_2F_2} = w_2$.
Moreover, the conductance of two edges in parallel is the sum of their effective conductances, the effective conductance of two edges in series is the inverse of the sum of the inverses of their effective conductances.
\begin{figure}[htb]
\vspace*{-.5\baselineskip}
\begin{center}
\includegraphics[scale=.65]{p2}
\end{center}
\caption{Notation for the proof of \eqref{eq:formule_lozenge}, and calculation of $p_{22}(w)$, the probability of reaching $F_2$ before $F_5$ starting from $P_2$.}\vspace*{-.5\baselineskip}
\label{fig:p2}
\end{figure}
Using these formulas, we get (see Figure~\ref{fig:p2} for details)
\[
\mathcal C_{P_2F_2}(\bs w)
= \frac{\big(w_3+\frac{w_1w_4}{w_1+w_4}\big)w_5}{w_3+w_5+\frac{w_1w_4}{w_1+w_4}}.
\]
We thus get
\begin{align*}
p_{22}(\bs w)
&= \frac{w_2}{w_2 + \frac{\big(w_3+\frac{w_1w_4}{w_1+w_4}\big)w_5}{w_3+w_5+\frac{w_1w_4}{w_1+w_4}}}
= \frac{w_2\big(w_3+w_5+\frac{w_1w_4}{w_1+w_4}\big)}{w_2\big(w_3+w_5+\frac{w_1w_4}{w_1+w_4}\big) + \big(w_3+\frac{w_1w_4}{w_1+w_4}\big)w_5}\\
&= \frac{w_2\big(w_3+w_5+\frac{w_1w_4}{w_1+w_4}\big)}{w_3+w_2w_5+\frac{w_1w_4}{w_1+w_4}},
\end{align*}
because $w_2 +w_5=1$ for all $\bs w\in\mathcal E'$.
By symmetry,
\begin{align*}
p_{25}(\bs w)= 1- \frac{w_5\big(w_2+w_3+\frac{w_1w_4}{w_1+w_4}\big)}{w_3+w_2w_5+\frac{w_1w_4}{w_1+w_4}}
&= \frac{(1-w_5)\big(w_3+\frac{w_1w_4}{w_1+w_4}\big)}{w_3+w_2w_5+\frac{w_1w_4}{w_1+w_4}}\\
&= \frac{w_2\big(w_3+\frac{w_1w_4}{w_1+w_4}\big)}{w_3+w_2w_5+\frac{w_1w_4}{w_1+w_4}}.
\end{align*}
Thus,~\eqref{eq:p2} becomes
\ba
p_2(\bs w)
&= \frac{w_1}{w_1+w_4}\cdot \frac{w_2\big(w_3+w_5+\frac{w_1w_4}{w_1+w_4}\big)}{w_3+w_2w_5+\frac{w_1w_4}{w_1+w_4}}
+\frac{w_4}{w_1+w_4}\cdot\frac{w_2\big(w_3+\frac{w_1w_4}{w_1+w_4}\big)}{w_3+w_2w_5+\frac{w_1w_4}{w_1+w_4}}\\
&= \frac{w_2(w_1(w_3+w_4+w_5)+w_3w_4)}{w_3+w_2w_5+\frac{w_1w_4}{w_1+w_4}},
\ea
as claimed.
We now calculate $p_3(\bs w)$: we decompose on the first step of the random walker to~get
\begin{equation}\label{eq:p3}
p_3(\bs w)
= \frac{w_1}{w_1+w_4}\cdot p_{32}(\bs w) + \frac{w_4}{w_1+w_4}\cdot p_{35}(\bs w),
\end{equation}
where $p_{32}(\bs w)$ (\resp $p_{35}(\bs w)$) is the probability to cross edge~$3$ before reaching $F$ starting from $P_2$ (\resp $P_5$), when the edge weights are given by $\bs w$.
Decomposing over the first weight of a random walker starting at $P_2$, we get
\[
p_{32}(\bs w) = \frac{w_1}{w_1+w_2+w_3}\cdot p_3(\bs w) + \frac{w_3}{w_1+w_2+w_3},
\]
and similarly for $p_{35}(\bs w)$.
Using this in~\eqref{eq:p3}, we get
\begin{multline*}
p_3(\bs w)
= \frac{w_1}{w_1+w_4}\Bigl(\frac{w_1}{w_1+w_2+w_3}\cdot p_3(\bs w) + \frac{w_3}{w_1+w_2+w_3} \Bigr)\\
+\frac{w_1}{w_1+w_4} \Bigl(\frac{w_4}{w_3+w_4+w_5}\cdot p_3(\bs w) + \frac{w_3}{w_3+w_4+w_5} \Bigr),
\end{multline*}
which implies
\begin{multline*}
\Bigl(1-\frac{w_1^2}{(w_1+w_4)(w_1+w_2+w_3)}-\frac{w_4^2}{(w_1+w_4)(w_3+w_4+w_5)} \Bigr)p_3(\bs w)\\
= \frac{w_3}{w_1+w_4} \Bigl(\frac{w_1}{w_1+w_2+w_3}+\frac{w_4}{w_3+w_4+w_5} \Bigr).
\end{multline*}
This indeed gives the formula for $p_3(\bs w)$ announced in~\eqref{eq:formule_lozenge}.
Finally, we show how to calculate $p_1(\bs w)$: again, we decompose according to the first step of the walker:
\begin{equation}\label{eq:p1_un}
p_1(\bs w)
= \frac{w_1}{w_1+w_4} + \frac{w_4}{w_1+w_4}\cdot p_{15}(\bs w),
\end{equation}
where $p_{15}(\bs w)$ is the probability to cross edge~1 before reaching $F$ starting from $P_5$.
Decomposing according to the first step again, we get
\ban
p_{15}(\bs w)
&= \frac{w_4}{w_3+w_4+w_5}\cdot p_1(\bs w) + \frac{w_3}{w_3+w_4+w_5}\cdot p_{12}(\bs w)\notag\\
&= \frac{w_4}{w_3+w_4+w_5} \Bigl( \frac{w_1}{w_1+w_4} + \frac{w_4}{w_1+w_4}\cdot p_{15}(\bs w) \Bigr)+ \frac{w_3}{w_3+w_4+w_5}\cdot p_{12}(\bs w),\label{eq:p1_bis}
\ean
where $p_{12}(\bs w)$ is the probability to cross edge~1 before reaching $F$ starting from $P_2$.
We have used~\eqref{eq:p1_un} in the second equality.
Finally, we have
\begin{equation}\label{eq:p1_ter}
p_{12}(\bs w)= \frac{w_1}{w_1+w_2+w_3} + \frac{w_3}{w_1+w_2+w_3}\cdot p_{15}(\bs w).
\end{equation}
Using~\eqref{eq:p1_ter} in~\eqref{eq:p1_bis} we get
\[
p_{15}(\bs w)
= \frac{\frac{w_1}{w_1+w_4}\cdot\frac{w_4}{w_3+w_4+w_5}+ \frac{w_1}{w_1+w_2+w_3}\cdot\frac{w_3}{w_3+w_4+w_5}}
{1-\frac{w_4^2}{(w_1+w_4)(w_3+w_4+w_5)}-\frac{w_3^2}{(w_1+w_2+w_3)(w_3+w_4+w_5)}},
\]
and we can then use in~\eqref{eq:p1_un} to get the formula for $p_1(\bs w)$ announced in~\eqref{eq:formule_lozenge}.
\backmatter
\bibliographystyle{jepalpha+eid}
\bibliography{kious-et-al}
\end{document}
__