\documentclass[10pt]{article}
\usepackage{graphicx}
\newcommand{\rmath}{\ensuremath{\mathbb{R}}}
\newcommand{\cmath}{\ensuremath{\mathbb{C}}}
\newcommand{\nmath}{\ensuremath{\mathbb{N}}}
\usepackage{slashbox}
\usepackage{amssymb}
\usepackage{listings}
\lstset{language=Matlab} 
\usepackage{longtable}
\usepackage{multirow}
\newtheorem{exe}{Exemple}
\newtheorem{rem}{Remark}
\newcommand{\kmath}{\ensuremath{\mathbb{K}}}
\newcommand{\norme}[1]{\ensuremath{\parallel\! #1\! \parallel}}
\newcommand{\scal}[1]{\ensuremath{\langle #1 \rangle}}
\usepackage[top=2cm, bottom=2 cm, left=2cm, right=2cm]{geometry}
\setlength{\parskip}{1pt}
\newcounter{savefootnote}% Save footnote counter
\usepackage{amsfonts}
\usepackage{amsmath}
\newtheorem{theorem}{Theorem}
\newtheorem{algorithm}[theorem]{Algorithm}
\newtheorem{case}[theorem]{Case}
\newtheorem{proof}{Proof}
\renewcommand{\figurename}{Fig.}
\begin{document}
\title{Bivariate spectral approximation.}
\author{Ismahene Sehili  \quad Abdelhamid Zerroug 
\thanks{%
Department of Mathematics, University of Biskra, Algeria,
E-mail: sehilimath@gmail.com.
\newline Department of Mathematics, University of Biskra, Algeria, E-mail:
zerroug60@gmail.com.
}}

\date{}






%\date{\today}
\maketitle
\begin{abstract}
In this paper, we propose a two-dimensional polynomial basis which extends Legendre series approximation to bivariate functions. The method is tested on functions with different
degrees of regularity. We also present a theoretical study of the stability and an error estimation of the Tau spectral method in this two-dimensional  basis.
 
\end{abstract}

\noindent \textbf{Keywords :  Two-dimensional Legendre basis, Rodrigues
construction, Error Estimation, Stability.}
%\noindent\textbf{AMS Classification :}
\section{Introduction.}
Legendre series expansion
\begin{equation}\label{AA}
u(x)= \sum_{i=0}^{\infty} \hat{u}_i P_i(x), \quad x \in [-1;1],
\end{equation}
$$\hat{u}_i = \scal{u, p_i}, \quad \scal{f,g}= \int_{-1}^{1} f(x)\cdot g(x)\;dx.  $$
is a useful and popular tool for the approximation of sufficiently regular univariate functions (in (\ref{AA}), $P_i(x)$ denotes as usual the {\it i-th} degree Legendre polynomial). In fact, the partial sums of Legendre series provide asymptotically optimal polynomial approximation, which can be constructed in a very efficient way ; see, the classical reference \cite{14}. It is also worth quoting here the "CHEBFUN" object-oriented Matlab system \cite{7}- \cite{13}, which has been recently developed in order to extend basic Matlab functions to the continuous context, via polynomial approximation. 
On the other hand, concerning the extension of polynomial approximation to bivariate functions only some scattered results (see, e.g., \cite{1}). In particular, bivariate Legendre approximation, taking into account the different behaviors of the underlying function in different parts of the domain, does not seem to be yet available.


One reason for their importance, polynomials uniformly approximate continuous functions. By this we mean that given any function, defined and continuous on a closed and bounded interval, there exist a polynomial that is as "close" to the given function. This result is expressed precisely in the Weierstrass Approximation Theorem \cite{12}. 


In \cite{1}-\cite{9}, authors expressed the solution function with the 2D Legendre polynomials  produced by a tensor-product. The approximate solution
is
\begin{equation}
U_N (x,y)  = \sum_{i=1}^{N}\sum_{j=1}^{N} \hat{U}_{i,j} P_{i}(x)P_{j}(y)   
\end{equation}
with the advantage of  two-dimensional polynomial basis, the solution can be expanded to a two-dimensional basis function

\begin{equation}
U_N (x,y)  = \sum_{i=1}^{N}\sum_{j=1}^{N-i} \hat{U}_{i,j} P_{i,j}(x,y)
\end{equation}
%  to have good approximations for solutions of multi-dimensional problems.



%%Two-dimensional approximation for the functions of two variables is an extension of one-dimensional approximation. Generally, the key idea is to perform one-dimensional approximation first in one variable, and then again in the other variable. Although each step is one-dimensional.




%The approximation of a bivariate function $f$ (known or not) by a polynomial
%is a natural process encountered in various analysis contexts, where $f$ is
%quite regular. In general, it is possible to analyze the local behavior, but
%also in some cases, we can describe the function as an infinite sum.
%
%In both situations the precision with which we can approximate $f$ by a
%polynomial depends on the regularity of the function. The Stone-Weierstrass
%theorem  assures us that every continuous function can be uniformly
%approaching on a compact domain.
In the following subsections we describe the Rodrigues construction of the
$2$D Legendre basis on the square $[-1,1]^2$, we discuss its practical implementation,
and we present some numerical tests. 

In Section \ref{sec},  we present  the general principle of the  Tau spectral method in a general framework and  we  provide a theoretical study of the stability and an estimation of error committed by the proposed
method in an abstract framework.
%
% we extend the Legendre approximation method to somme bivariate
%functions, we discuss its practical implementation, 
%and we give several numerical examples
%concerning functions with different degree of regularity.
\section{Construction and properties.}
\subsection{Rodrigues construction.}\label{1RT}
 Two-dimensional Legendre Polynomials  may be expressed using this
generalization of Rodrigues formula 
\begin{equation*}
P_{i,j} (x,y) = \displaystyle \frac{1}{2^{i+j}\, i! \, j ! } \displaystyle 
\frac{\partial^i}{\partial x^i} (x^2-1)^i \;\displaystyle \frac{\partial^j}{%
\partial y^j} (y^2-1)^j
\end{equation*}

\begin{proof}
Two-dimensional Legendre
polynomials are solutions of the following PDE 

\begin{equation}  \label{a}
(1-x^2) \displaystyle \frac{\partial^{2} U}{\partial x^{2}} (1-y^2) %
\displaystyle \frac{\partial^{2} U}{\partial y^{2}} - 2 x \displaystyle 
\frac{\partial U}{\partial x} - 2 y \displaystyle \frac{\partial U}{\partial
y}+[i(i+1)+ j(j+1)] U =0
\end{equation}
Let $v_1= (x^2-1)^i$ and $v_2 = (y^2-1)^j$, then 


\begin{equation}\label{ZZ}
(1-x^2)\displaystyle \frac{\partial }{\partial x}v_1 + 2\,i\, v_1\, x = 0 
\end{equation}
\begin{equation}\label{BB}
(1-y^2)\displaystyle \frac{\partial }{\partial y}v_2 + 2\,j\, v_2 \,y = 0
\end{equation}

Differentiating $(\ref{ZZ})$  $i+1$ times and $(\ref{BB})$ $j+1$ times using
Leibniz formula, we get after simplification 

\begin{eqnarray}  \label{CC}
(1-x^2) \displaystyle \frac{\partial^{i+2} }{\partial x^{i+2}}v_1 - 2 x %
\displaystyle \frac{\partial^{i+1} }{\partial x^{i+1}}v_1 + i(i+1) %
\displaystyle \frac{\partial^{i} }{\partial x^{i}}v_1 &=& 0  \notag \\
(1-x^2) \displaystyle \frac{\partial^{i+2} }{\partial x^{i+2}}v_2 - 2 x %
\displaystyle \frac{\partial^{i+1} }{\partial x^{i+1}} v_2+ i(i+1) %
\displaystyle \frac{\partial^{i} }{\partial x^{i}}v_2 &=& 0
\end{eqnarray}
Let $u_1 = \displaystyle \frac{\partial^{i}}{\partial x^{i}} (x^2-1)^i = %
\displaystyle \frac{\partial^{i} }{\partial x^{i}}v_1$ and $u_2 = %
\displaystyle \frac{\partial^{j}}{\partial y^{j}} (y^2-1)^j = 
\displaystyle 
\frac{\partial^{j} }{\partial y^{j}}v_2$ \newline


Putting $U= u_1\times u_2$ then by summation in equation $(\ref{CC})$, we
have $(\ref{a})$, therefore 
\begin{equation*}
u_1= c_1 P_{i,j}(x,y)\qquad \text{and} \qquad u_2= c_2 P_{i,j}(x,y)
\end{equation*}
\begin{equation}  \label{p}
U = u_1 \times u_2 = C P_{i,j}(x,y)
\end{equation}
Or 
\begin{equation*}
\displaystyle \frac{\partial^{i} }{\partial x^{i}} (x^2-1)^i = c_1
P_{i,j}(x,y) \qquad \text{and} \qquad \displaystyle \frac{\partial^{j} }{%
\partial y^{j}} (y^2-1)^j = c_2 P_{i,j}(x,y)
\end{equation*}
Then, \vspace{-0.5cm} 
\begin{eqnarray*}
c_1 P_{i,j}(x,y) &=& \displaystyle \frac{\partial^{i}}{\partial x^{i}}
[(x-1)^i\, (x+1)^i ] \\
&=& (x-1)^i \displaystyle \frac{\partial^{i} }{\partial x^{i}} (x+1)^i + i\,
k_1\, i\, (x-1)^{i-1} \displaystyle \frac{\partial^{i-1}}{\partial x^{i-1}}
(x+1)^i + \cdots + (x+1)^i \displaystyle \frac{\partial^{i} }{\partial x^{i}}
(x-1)^i \\
&=& (x-1)^i \, (i\,!) + i\, k_1\, i\, (x-1)^{i-1} \displaystyle \frac{%
\partial^{i} }{\partial x^{i-1}} (x+1)^{i-1} (x+1)^i + \cdots + (x+1)^i\,
(i\,!) \\
&=& (i\,!) (x+1)^i + \; \text{terms containing power of }\; (x-1)
\end{eqnarray*}
Similarly 
\begin{eqnarray*}
c_2 P_{i,j}(x,y) &=& \displaystyle \frac{\partial^{j}}{\partial y^{j}}
[(y-1)^j\, (y+1)^j ] \\
&=& (j\,!) (y+1)^j + \; \text{terms containing power of }\; (y-1)
\end{eqnarray*}
If $(x,y)=(1,1)$, we get 
\begin{equation*}
U= u_1\times u_2 = c_1\,c_2 P_{i,j}^2 (1,1)= C P_{i,j}^2 (1,1) = (i\,!)
2^i\,(j\,!) 2^j \Rightarrow C= (i\,!) 2^i\,(j\,!) 2^j.
\end{equation*}
Putting it in $(\ref{p})$, we get 
\begin{equation*}
P_{i,j} (x,y) = \displaystyle \frac{1}{2^{i+j}\, i! \, j ! } \displaystyle 
\frac{\partial^i}{\partial x^i} (x^2-1)^i \;\displaystyle \frac{\partial^j}{%
\partial y^j} (y^2-1)^j
\end{equation*}
\end{proof} 
We can easily prove the orthogonality of  two-dimensional Legendre
polynomials, i e, we prove that \cite{11}
\begin{equation*}
\langle P_{i,j}, P_{l,k} \rangle = 0 \qquad \text{for} \qquad (i,j)\neq
(l,k).
\end{equation*}
Legendre polynomial basis has a several advantages, as we have shown in the previous section, it is easy to construct the elements of the two-dimensional Legendre basis either by recursion relation \cite{15}, or by Rodrigues formula. On other hand, polynomial approximation in this basis is easily implemented using a simple code on any computer.


It is important to note that the weight function $
\omega (x,y)$ of the inner product  is equal to unity, this makes the using of the projection methods 
easier and faster, unlikely for many bases of orthogonal polynomials that
have weight functions which requires considerable time for the computation.

\subsection{Differentiation.}\label{cons}
\vspace{-0.2cm} Differentiation is to calculate the Legendre development of
the derivative of a function expressed as a combination of the
two-dimensional Legendre polynomials. If 
\begin{equation*}
U_N(X,t)= \sum_{i=0}^{N} \sum_{j=0}^{N-i} \hat{U}_{i,j} (t) P_{i,j}(x,y),
\quad X \in \mathbb{R}^n \times \mathbb{R}^n.
\end{equation*}
by the recurrence relation 
\begin{equation*}
\left\lbrace 
\begin{array}{ccc}
(2i+1) P_{i,j}(x,y) & = & \displaystyle \frac{\partial}{ \partial x}
P_{i+1,j} (x,y)-\displaystyle\frac{\partial}{ \partial x} P_{i-1,j} (x,y) \\  
(2j+1) P_{i,j}(x,y) & = & \displaystyle \frac{\partial}{ \partial y}
P_{i,j+1} (x,y)-\displaystyle \frac{\partial}{ \partial y} P_{i,j-1} (x,y),%
\end{array}
\right.
\end{equation*}
\begin{equation*}
\left\lbrace 
\begin{array}{ccc}
\displaystyle \sum_{j=0}^{\infty} \sum_{i=0}^{\infty} \hat{U}_{i,j}^{(1)}
P_{i,j}(x,y) & = & \displaystyle \sum_{j=0}^{\infty} \sum_{i=0}^{\infty} 
\frac{\hat{U}_{i,j}^{(1)}}{2i+1} \frac{\partial}{\partial x} P_{i+1,j} (x,y)-%
\displaystyle \sum_{j=0}^{\infty} \sum_{i=0}^{\infty} \frac{\hat{U}%
_{i,j}^{(1)}}{2i+1} \frac{\partial}{\partial x} P_{i-1,j} (x,y) \\ 
\displaystyle \sum_{i=0}^{\infty} \sum_{j=0}^{\infty} \hat{U}_{i,j}^{(1)}
P_{i,j}(x,y) & = & \displaystyle \sum_{i=0}^{\infty} \sum_{j=0}^{\infty} 
\frac{\hat{U}_{i,j}^{(1)}}{2j+1} \frac{\partial}{\partial y} P_{i,j+1} (x,y)-%
\displaystyle \sum_{j=0}^{\infty} \sum_{i=0}^{\infty} \frac{\hat{U}%
_{i,j}^{(1)}}{2j+1} \frac{\partial}{\partial y} P_{i,j-1} (x,y)%
\end{array}
\right.
\end{equation*}
\begin{equation*}
\left\lbrace 
\begin{array}{ccc}
\displaystyle \frac{\partial U}{\partial x} (x,y) & = & \displaystyle %
\sum_{j=0}^{\infty} \sum_{i=1}^{\infty} \frac{\hat{U}_{i-1,j}^{(1)}}{2i-1} 
\frac{\partial}{\partial x} P_{i,j} (x,y)-\displaystyle \sum_{j=0}^{\infty}
\sum_{i=-1}^{\infty} \frac{\hat{U}_{i+1,j}^{(1)}}{2i+3} \frac{\partial}{%
\partial x} P_{i,j} (x,y) \\ 
\displaystyle \displaystyle \frac{\partial U}{\partial y} (x,y) & = & %
\displaystyle \sum_{i=0}^{\infty} \sum_{j=1}^{\infty} \frac{\hat{U}%
_{i,j-1}^{(1)}}{2j-1} \frac{\partial}{\partial y} P_{i,j} (x,y)-%
\displaystyle \sum_{j=-1}^{\infty} \sum_{i=0}^{\infty} \frac{\hat{U}%
_{i,j+1}^{(1)}}{2j+3} \frac{\partial}{\partial y} P_{i,j} (x,y)%
\end{array}
\right.
\end{equation*}
\begin{equation*}
\left\lbrace 
\begin{array}{ccc}
\displaystyle \frac{\partial U}{\partial x} (x,y) & = & \displaystyle %
\sum_{j=0}^{\infty} \sum_{i=1}^{\infty} \left[ \displaystyle \frac{\hat{U}%
^{(1)}_{i-1,j}}{2i-1} - \frac{\hat{U}^{(1)}_{i+1,j}}{2i+3} \right]%
\displaystyle\frac{\partial}{\partial x} P_{i,j}(x,y) \\ 
\displaystyle \frac{\partial U}{\partial y} (x,y) & = & \displaystyle %
\sum_{i=0}^{\infty} \sum_{j=1}^{\infty} \left[ \displaystyle \frac{\hat{U}%
^{(1)}_{i,j-1}}{2j-1} - \displaystyle \frac{\hat{U}^{(1)}_{i,j+1}}{2j+3}%
\right] \frac{\partial}{\partial y} P_{i,j}(x,y).%
\end{array}
\right.
\end{equation*}
On the other hand, we have 
\begin{equation*}
\left\lbrace 
\begin{array}{ccc}
\displaystyle \frac{\partial U}{\partial x} (x,y) & = & \displaystyle %
\sum_{j=0}^{\infty} \sum_{i=0}^{\infty} \hat{U}_{i,j} \displaystyle \frac{%
\partial}{\partial x} P_{i,j}(x,y), \\ 
\displaystyle \frac{\partial U}{\partial x} (x,y) & = & \displaystyle %
\sum_{j=0}^{\infty} \sum_{i=0}^{\infty} \hat{U}_{i,j} \displaystyle \frac{%
\partial}{\partial y} P_{i,j}(x,y),%
\end{array}
\right.
\end{equation*}
and since the partial derivatives of $P_{i,j}$ are linearly independent 
\begin{equation*}
\hat{U}_{i,j}= \left\lbrace 
\begin{array}{ccc}
\displaystyle \frac{\hat{U}^{(1)}_{i-1,j}}{2i-1} - \frac{\hat{U}%
^{(1)}_{i+1,j}} {2i+3} \quad i \geq 1\qquad\qquad \text{(for the development
of}\;\; \displaystyle \frac{\partial U}{\partial x}) &  &  \\ 
\displaystyle \frac{\hat{U}^{(1)}_{i,j-1}} {2j-1} - \frac{\hat{U}%
^{(1)}_{i,j+1}} {2j+3} \quad j \geq 1 \qquad\qquad \text{(for the
development of}\;\;\displaystyle \frac{\partial U}{\partial y}) . &  & 
\end{array}
\right.
\end{equation*}
then

\begin{equation*}
\hat{U}_{i,j}^{(1)}= \left\lbrace 
\begin{array}{ccc}
(2i+1)\displaystyle \left(\sum_{\overset{p=i+1}{p+i \,{\small {\text{odd}}}}}%
\hat{U}_{i,j} \right), \quad i, j \geq 0, &  &  \\ 
(2j+1)\displaystyle \left(\sum_{\overset{p=j+1}{p+j \,{\small {\text{odd}}}}%
} \hat{U}_{i,j} \right), \quad i, j \geq 0, &  & 
\end{array}
\right.
\end{equation*}

The previous identity generalizes, with obvious notation to 
\begin{equation*}
\hat{U}_{i,j}^{(q-1)}= \left\lbrace 
\begin{array}{ccc}
\displaystyle \frac{\hat{U}^{(q)}_{i-1,j}}{2i-1} - \frac{\hat{U}%
^{(q)}_{i+1,j}} {2i+3} \quad i \geq 1, &  &  \\ 
\displaystyle \frac{\hat{U}^{(q)}_{i,j-1}} {2j-1} - \frac{\hat{U}%
^{(q)}_{i,j+1}} {2j+3} \quad j \geq 1, &  & 
\end{array}
\right.
\end{equation*}
from which it is possible to get explicit expressions for the Legendre
coefficients of higher derivatives. \newline
For the second derivative we have 
\begin{equation*}
\hat{U}_{i,j}^{(2)}= \left\lbrace 
\begin{array}{ccc}
(i+\displaystyle \frac{1}{2})\displaystyle \left(\sum_{\overset{p=i+2}{p+i \,%
{\small {\text{even}}}}} [p(p+1)-i(i+1)] \hat{U}_{i,j} \right), \quad i,j
\geq 0, &  &  \\ 
(j+\displaystyle \frac{1}{2}) \left(\sum_{\overset{p=j+2}{p+j \,{\small {%
\text{even}}}}} [p(p+1)-j(j+1)] \hat{U}_{i,j} \right), \quad i,j \geq 0, & 
& 
\end{array}
\right.
\end{equation*}
the previous expansions are not merely formal provided $U$ is smooth enough.

The approximation of the derivative of a given function  developed in this basis can be obtained with a simple calculus of coefficients. 

\subsection{Numerical tests.\label{ZzZ}}
In order to give a first illustration of the performance of our implementation
of  bivariate Legendre approximation, we apply the method to some bivarite functions.


Table (\ref{tab1}) reports the results corresponding to the application of the
approximation algorithm at various order (application of the method
in $[-1;1]^2$ ). The absolute and relative errors have been computed with a suitable norm.
\renewcommand\arraystretch{1.1}
\begin{longtable}[c]{cccc}
\hline
\textbf{f(x,y)}&\textbf{N} & \bf Absolute error& \bf Relative error \\
\hline\hline
\multirow{5}{20 mm}{\\\bf \begin{center}
$\cos(x+y)$
\end{center} } 
& $2^0$ & 0.6387 & 0.4111 \\%\cline{2-4}
& $2^1$ &  0.3343 & 0.2152\\%\cline{2-4}
& $2^2$&  0.0359& 0.0231 \\%\cline{2-4} 
& $2^3$&  0 & 0  \\ \hline%\hline
\multirow{7}{20 mm}{\\\bf \begin{center}\vspace{-1.5cm}
$\exp(x+y)$
\end{center} } 
& $2^0$ & 1.8115 & 0.4995 \\%\cline{2-4}
& $2^2$ &  0.6158 & 0.1698 \\%\cline{2-4}
& $2^2$&  0.1022 & 0.0282 \\%\cline{2-4} 
& $2^3$& 0.0014  &3.8565 $\times 10^{-4}$\\ \hline
\caption{ Absolute and relative approximation error.}\label{tab1}
\end{longtable}





For the sake of comparison with another widely used bivariate method (least squares method), we
show in Table (\ref{tab2}) the results of an approximation in another basis (basis constructed by a tensor product of the Chebychev polynomials) \cite{8}-\cite{3}





%In order to illustrate the performance of the adaptive Chebyshev algorithm
%on a less regular function, we consider f(x; y) = (x2 +y2)5=2 on [􀀀1; 1]2 and on
%[0; 2]2, which is of class C4 with Lipschitz-continuous fourth partial derivatives,
%but has _fth partial derivatives that are discontinuous at the origin. The results
%are reported in Table 3, where one can observe that the algorithm performs
%better when the singularity is located at a corner of the square, since Chebyshev
%sampling nodes cluster at the sides and especially at the corners. This suggests
%that, whenever the location of a singularity (of the derivatives) is known, the
%square (rectangle) should be split into four subrectangles, with the singularity
%IN THE COMMON VERTEX.












\renewcommand\arraystretch{1.1}
\begin{longtable}[c]{cccc}
\hline
\textbf{f(x,y)}&\textbf{N} & \bf Legendre Error& \bf Chebychev Error \\
\hline
\multirow{3}{20 mm}{\\\bf \begin{center}
$x^2y+y^2x$\end{center} } & $2^0$ & 0.4869 & 0.5196 \\%\cline{2-4}
& $2^1$ & 0.3443&0.4732 \\%\cline{2-4}
& $2^2$& 0 & 0.4218 \\%\hline
\hline\multirow{4}{15 mm}{\\\bf \begin{center}
$y\cos(x)$
\end{center} }
 & $2^0$ & 0.9848 & 0.9958 \\%\cline{2-4}
& $2^1$ &  0.1602 & 0.3613 \\%\cline{2-4}
& $2^2$&     35$\times 10^{-4}$ & 0.5389         \\%\cline{2-4} 
&$2^3$&3.0037$\times 10^{-5}$ & 1.4443 \\ \hline
\hline
\multirow{8}{20 mm}{\\\bf \begin{center}\vspace{-2cm}$\sin \pi x\sin \pi y $
\end{center} } 
& $2^0$ & 1 & 1\\%\cline{2-4}
& $2^1$ &  0.7940 & 0.8628 \\%\cline{2-4}
& $2^2$&  0.6304 & 0.6439\\%\cline{2-4} 
& $2^3$&  0.1322 & 0.6853  \\%\cline{2-4} 
& $2^4$&  3$\times 10^{-4}$& 0.8144 \\ \hline
\caption{Square approximation error  for some  functions.}\label{tab2}
\end{longtable}

Practical implementation of the bivariate Legendre approximation method
follows strictly the construction in (\ref{1RT}).

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%



\section{Spectral method.}\label{sec}
Spectral methods are among the numerical methods commonly used for approximating solutions of boundary value problems. In this section we present a generalization of the Tau spectral  method in dimension $2$.
\subsection{Theoretical background.}
Let the universe of our discussion be the Hilbert space $\mathcal{H}:= \mathcal{L}^2([-1,1]\times [-1,1],\cmath)$. We wish to obtain an easily implantable computer model and to process large volumes of data. We consider a general formulation of a PDE problem 
$$ 
\left\lbrace
\begin{array}{l l l}
\displaystyle  \frac{\partial U}{\partial t}(X,t)&=&\mathcal{M}(U(X,t)) +f(X,t)\quad\quad\quad\; X\in \Omega \quad \quad\quad t\geq 0,
\\
\mathcal{C}(U(X,t))&=&g(t)\qquad  X\in \partial \Omega \qquad t>0,
\\
U(X,0)&=&U_{0}(X) \qquad\    X\in \Omega. 
\end{array}
\right.
$$
\begin{itemize}\renewcommand {\labelitemi }{$-$}
\item $\Omega$ is a bounded domain of $\rmath^n\times \rmath^n $   with the border  $\partial\Omega$.
\item $X$ is a pair $(x,y)$, where $x \in \rmath^n$ and $y \in \rmath^n$. 
\item $U(X,t)$ the the unknown function in Hilbert space $\mathcal{H}$, $f$ an element of  $\mathcal{H}$.
\item $\mathcal{M}$, $\mathcal{C}$ are  operators from $\mathcal{H}$ on $\mathcal{H}$, where $\mathcal{C}$ defines the boundary conditions. 
\end{itemize}
If $\mathcal{M}$ contains partial derivatives of order $ k $, the boundary conditions are the number of $k$ ($ g $ with $k$ components). Let $ (P_ {i,j}) _ {i,j = 1 \ldots \infty} $ be a two-dimensional orthogonal Legendre basis  which does not satisfy the boundary conditions.\\ We search an approximation
\begin{equation}
U_N (X,t)  = \sum_{i=1}^{N}\sum_{j=1}^{N-i} \hat{U}_{i,j} P_{i,j} \quad \textrm{with}\quad \hat{U}_{i,j} = \scal{U_N, P_{i,j}}
\end{equation}
 in $S_{N}$
(where $ S_N $ represents the space generated by the $ N $ first two-dimensional  Legendre polynomials) 
such as
\begin{eqnarray*}
P_{N-k}^{\bot} R_{N}&=&0 \qquad N-k \quad\mbox{equations}\\
C\, U_{N}&=& 0 \qquad k\quad \mbox{equations}
\end{eqnarray*}
\begin{itemize}\renewcommand {\labelitemi }{$-$}
\item $R_{N}$ is the residual function of the spectral decomposition in the two-dimensional Legendre basis, it is given by
\begin{equation}
R_{N}=\frac{\partial U_{N}}{\partial t}-\mathcal{M}(U_{N})-f.
\end{equation}  
\item $P_{N-k}^{\bot} R_{N}$ denotes the orthogonal projection of $\mathcal{H}$ on $S_{N-k}$.
\item $S_{N-k}$ space generated by the $ N-k $ first two-dimensional Legendre polynomials.
\item $U_{N}$ is then determined by $ N$ ordinary differential equations.
\end{itemize}

\subsection{Error Estimation and stability results.}\label{AAAA}
 Numerical stability is a global property of the numerical algorithms, a necessary quality to get meaningful results. In this section, we present a theoretical result concerning the stability of the method discussed below.
 


Let $\mathcal{H}$ be a $\mathbb{C}$-Hilbert space, $\mathcal{S}$ a sub-space of $\mathcal{H}$ generated by $(N-k)$ first two-dimensional Legendre polynomials. $dim(\mathcal{S})= n = (N-k)$, where $k$ represents the number of boundary conditions. Our problem is to find a function $U \in \mathcal{H}$ such that 
\begin{equation}\label{pb1}
\left\{ 
\begin{array}{ccc} 
\displaystyle\frac{\partial U}{\partial t}(X,t)+ \mathcal{M}\left(U(X,t)\right)&=& f\\
\mathcal{C}\left(U(X,t)\right)&= & g \\
U(0)&=& U_0,
\end{array}
\right.
\end{equation}
where $f,g$ and $U_0$ are given elements of $\mathcal{H}$.
\\Let $f_{n}\in \mathcal{S}$ be an approximation of $f$, and $U^0_{n}$ an approximation of  $U_0$, then the problem $(\ref{pb1})$ can be rewritten as 
\begin{equation}\label{Pb2}
\displaystyle\frac{\partial} {\partial t} ( \mathcal{P} U_{n})+ \mathcal{M}\left(U_{n}\right)=f_{n},  
\end{equation}
where $\mathcal{P}$ represents the Tau projection operator on $\mathcal{S}$.
\\Assume that $\mathcal{S}$ is a normed space and its norm satisfies   
\begin{equation}\label{hyp1}
\forall s \in \mathcal{S}, \quad Re \scal{\mathcal{P}\, s, \mathcal{M}\, s} \geq \kappa \norme{s}^2
\end{equation}
and
\begin{equation}\label{hyp2}
Re \scal{f,g} \leq \norme{f} \cdot \norme{g}
\end{equation}
We define $\tilde{f}_{n}$ by  
$$\tilde{f}_{n} = \displaystyle\frac{\partial} {\partial t} ( \mathcal{P} \tilde{U}_{n})+ \mathcal{M}\left(\tilde{U}_{n}\right),$$
where $U_{n}$ the solution of the problem (\ref{pb1}) and $ \tilde{U}_{n}$ any.
\\Under the assumptions $(\ref{hyp1})$ and $(\ref{hyp2})$, we can prove the following result
\begin{equation}\label{1044}
\norme{\mathcal{M}(U_{n}-\tilde{U}_{n})}^2 \leq \norme{\mathcal{M}(U_{n} (0)-\tilde{U}_n(0))}^2 + \frac{1}{\kappa} + \int_{0}^{t} \norme{\mathcal{M}^* (f_{n}-\tilde{f}_{n}(r))}^2 \, dr
\end{equation}
where $\mathcal{M}^*$ denotes the adjoint  of the operator $\mathcal{M}$.
\\This increase interprets as a result of stability, but it requires some regularity conditions.
\begin{proof} 
We put
\begin{equation}\label{R1}
\displaystyle\frac{\partial} {\partial t} (\mathcal{P} U_{n})+ \mathcal{M}\left(U_{n}\right)=f_{n} 
\quad \textrm{and}\quad 
\displaystyle\frac{\partial} {\partial t} (\mathcal{P}\tilde{U}_{n})+ \mathcal{M}\left(\tilde{U}_{n}\right)=\tilde{f}_{n} 
\end{equation}
then  
$$\mathcal{P} ( \displaystyle\frac{\partial} {\partial t} (U_{n}-\tilde{U}_{n})+ \mathcal{M}\left(U_{n}-\tilde{U}_{n}\right)) =f_{n}-\tilde{f}_{n}.$$
Which is multiplied by $\mathcal{M}(U_n-\tilde{U}_n)$ for the inner product on the space $\mathcal{S}$
\begin{equation}\label{A}
\begin{array}{c c}
\scal{\mathcal{P}\left(\displaystyle \frac{\partial}{\partial t}(U_{n}- \tilde{U}_{n})\right), \mathcal{M}\left(     \displaystyle \frac{\partial}{\partial t}(U_{n}- \tilde{U}_{n})\right)} + \scal{\mathcal{M}\left(U_{n} - \tilde{U}_{n} \right) ,  \mathcal{M}\left(     \displaystyle \frac{\partial}{\partial t}(U_{n}- \tilde{U}_{n})\right) }
\\ = \scal{f_{n}- \tilde{f}_{n}, \mathcal{M}\left(     \displaystyle \frac{\partial}{\partial t}(U_{n}- \tilde{U}_{n})\right)},
\end{array} 
\end{equation}
whose real part is taken, and since $ \displaystyle \frac{\partial}{\partial t} (U_{n}-\tilde{U}_n) \in \mathcal{S} $, it will be
$$\kappa \norme{U_n-\tilde{U}_n}^2 \leq Re \scal{\mathcal{P} \left(\displaystyle \frac{\partial}{\partial t} (U_n-\tilde{U}_n)\right), \mathcal{M}\left(\displaystyle \frac{\partial}{\partial t}(U_n-\tilde{U}_n) \right)}.$$
on the other hand 
\begin{eqnarray*}
Re \scal{f_{n}- \tilde{f}_{n}, \mathcal{M}\left(     \displaystyle \frac{\partial}{\partial t}(U_{n}- \tilde{U}_{n})\right)} &=& Re \scal{\mathcal{M}^*(f_{n}- \tilde{f}_{n}),      \displaystyle \frac{\partial}{\partial t}(U_{n}- \tilde{U}_{n})} \\  
&\leq &
(\norme{\mathcal{M}^*(f_n-\tilde{f}_n)}) \cdot (\norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)}) \\ 
&\leq &
(\displaystyle \frac{1}{\sqrt{\kappa}} \norme{\mathcal{M}^*(f_n-\tilde{f}_n)})\cdot (\sqrt{\kappa} \norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)})
\end{eqnarray*}
Since 
$$
\left(\displaystyle \frac{1}{\sqrt{\kappa}} \norme{\mathcal{M}^*(f_n-\tilde{f}_n)} - \sqrt{\kappa} \norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)}\right)^2 \geq 0
$$
then 
\begin{eqnarray*}
\displaystyle \frac{1}{2\kappa} \norme{\mathcal{M}^*(f_n-\tilde{f}_n)}^2 + \frac{\kappa}{2}  
\norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)}^2 &\geq& 
( \displaystyle \frac{1}{\sqrt{\kappa}} \norme{\mathcal{M}^*(f_n-\tilde{f}_n)}) \cdot (\sqrt{\kappa} \norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)})\\
&\geq& 
(\norme{\mathcal{M}^*(f_n-\tilde{f}_n)}) \cdot (\norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)})
\end{eqnarray*}
then we have 
\begin{eqnarray*}
Re \scal{\mathcal{M}^*(f_{n}- \tilde{f}_{n}),      \displaystyle \frac{\partial}{\partial t}(U_{n}- \tilde{U}_{n})} &\leq& \displaystyle \frac{1}{2\kappa} \norme{\mathcal{M}^*(f_n-\tilde{f}_n)}^2 + \frac{\kappa}{2}  
\norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)}^2.
\end{eqnarray*}
The formula $(\ref{A})$ becomes
\begin{eqnarray*}
\kappa \norme{\displaystyle \frac{\partial}{\partial t} (U_n -\tilde{U}_n)}^2 + \frac{1}{2}\frac{\partial}{\partial t}
\norme{\mathcal{M}(U_n -\tilde{U}_n)}^2 
&\leq& \displaystyle \frac{1}{2\kappa} \norme{\mathcal{M}^*(f_n-\tilde{f}_n)}^2 + \frac{\kappa}{2}  
\norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)}^2 \\
&\leq& \displaystyle \frac{1}{2\kappa} \norme{\mathcal{M}^*(f_n-\tilde{f}_n)}^2 + \kappa 
\norme{\displaystyle \frac{\partial}{\partial t} (U_n - \tilde{U}_n)}^2. 
\end{eqnarray*}
hence
\begin{eqnarray*}
\frac{\partial}{\partial t}
\norme{\mathcal{M}(U_n -\tilde{U}_n)}^2 &\leq& \displaystyle \frac{1}{\kappa} \norme{\mathcal{M}^*(f_n-\tilde{f}_n)}^2.
\end{eqnarray*}
by integration of both sides of this inequality we have
\begin{eqnarray*}
\int_{0}^t \frac{\partial}{\partial r}
\norme{\mathcal{M}(U_n -\tilde{U}_n)(r)}^2 dr  &\leq&  \displaystyle \frac{1}{\kappa} \int_{0}^t \norme{\mathcal{M}^*(f_n-\tilde{f}_n)(r)}^2 dr \\
\norme{\mathcal{M}(U_n -\tilde{U}_n)(t)}^2 &\leq& \norme{\mathcal{M}(U_n -\tilde{U}_n)(0)}^2 + \displaystyle \frac{1}{\kappa} \int_{0}^t \norme{\mathcal{M}^*(f_n-\tilde{f}_n)(r)}^2 dr. 
\end{eqnarray*}
\end{proof}








Now, assuming that $f$ and $U$ are regular functions. Let $\tilde{U}_n$ is an approximation of $U$ such that $\tilde{U}_n(0)=U_n^0$. Using the increase $(\ref{1044})$, we can obtain the following error estimation
\begin{eqnarray*}
\norme{\mathcal{M}(U-\tilde{U}_n)(t)} &\leq& \displaystyle\max_{0\leq r\leq t}\norme{\mathcal{M}(U-\tilde{U}_n)(r)}+\left(\displaystyle\max_{0\leq r\leq t} \norme{\mathcal{M}^*(f-f_n)(r)}\right.\\&+ &\left. 
\displaystyle\max_{0\leq r\leq t}\norme{\frac{\partial}{\partial t}(U-\tilde{U}_n)(r)}+
\displaystyle\max_{0\leq r\leq t}\norme{\mathcal{M}^*\mathcal{M}(U-\tilde{U}_n)(r)}\right).
\end{eqnarray*}



\begin{proof}
We have
\begin{equation*}
\norme{\mathcal{M} (U-U_n)(t)} \; \leq \;\norme{\mathcal{M}(U-\tilde{U}_n)(t)} +\norme{\mathcal{M}(U_n-\tilde{U}_n)(t)},
\end{equation*}
we can  estimate $\norme{\mathcal{M}(U_n-\tilde{U}_n)(t)}$ from (\ref{1044})
\begin{eqnarray*}
\norme{\mathcal{M}^* (f_n-f)(t)} &\leq & \norme{\mathcal{M}^* (f-f_n)(t)} + \norme{\mathcal{M}^* (f-\tilde{f}_n)(t)} \\
&\leq &  \norme{\mathcal{M}^* (f-f_n)(t)} + \norme{\mathcal{M}^* \frac{\partial}{\partial t} (U-\tilde{U_n)}} + \norme{\mathcal{M} \mathcal{M}^*  (U-\tilde{U_n)}}
\end{eqnarray*}
then 
\begin{eqnarray*}
\norme{\mathcal{M}(U-\tilde{U}_n)(t)} &\leq& \displaystyle\max_{0\leq r\leq t}\norme{\mathcal{M}(U-\tilde{U}_n)(r)}+\left(\displaystyle\max_{0\leq r\leq t} \norme{\mathcal{M}^*(f-f_n)(r)}\right.\\&+ &\left. 
\displaystyle\max_{0\leq r\leq t}\norme{\frac{\partial}{\partial t}(U-\tilde{U}_n)(r)}+
\displaystyle\max_{0\leq r\leq t}\norme{\mathcal{M}^*\mathcal{M}(U-\tilde{U}_n)(r)}\right).
\end{eqnarray*}
\end{proof}



The results obtained by the examples presented in the tables and the various estimations [\ref{AAAA}] show the efficiency and the performance of the proposed approximation.


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


%\section{Competing Interests}
%The authors declare that they have no competing interests.
%
%
%\section{Authors contributions}
%The contribution was made by the three authors as equal : Ismahène Sehili, Abdelhamid Zerroug and Azedine Rahmoune.
%
\bibliographystyle{plain}
\bibliography{Bibliography}



\end{document}