lilit.likelihood

  1import pickle
  2import os
  3
  4import matplotlib.pyplot as plt
  5import numpy as np
  6from cobaya.likelihood import Likelihood
  7
  8
  9class LiLit(Likelihood):
 10
 11    """Class defining the Likelihood for LiteBIRD (LiLit).
 12
 13    Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing.
 14
 15    Parameters:
 16        name (str):
 17            The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None).
 18        fields (list):
 19            List of fields in the data file (default: None).
 20        lmax (int or list):
 21            Maximum multipole to consider (default: None).
 22        like (str, optional):
 23            Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian".
 24        lmin (int or list):
 25            Minimum multipole to consider (default: 2).
 26        cl_file (str, optional):
 27            Path to Cl file (default: None).
 28        nl_file (str, optional):
 29            Path to noise file (default: None).
 30        experiment (str, optional):
 31            Name of experiment (default: None).
 32        nside (int, optional):
 33            Nside of the map (default: None).
 34        r (float, optional):
 35            Tensor-to-scalar ratio (default: None).
 36        nt (float, optional):
 37            Tensor spectral tilt (default: None).
 38        pivot_t (float, optional):
 39            Pivot scale of the tensor primordial power spectrum (default: 0.01).
 40        fsky (float or list):
 41            Sky fraction (default: 1).
 42        sep (str, optional):
 43            Separator used in the data file (default: "").
 44        debug (bool, optional):
 45            If True, produces more verbose output (default: None).
 46
 47
 48    Attributes:
 49        fields (list):
 50            List of fields in the data file.
 51        n_fields (int):
 52            Number of fields.
 53        keys (list):
 54            List of keywords for the dictionaries.
 55        gauss_keys (list):
 56            List of keywords for the Gaussian likelihood (4-points).
 57        sigma2 (np.ndarray):
 58            Array of covariances for the Gaussian likelihood case.
 59        lmax (int or list):
 60            List of lmax values.
 61        lmaxes (dict):
 62            Dictionary of lmax values.
 63        fsky (int or list):
 64            List of fsky values.
 65        fskies (dict):
 66            Dictionary of fsky values.
 67        lmin (int or list):
 68            Minimum multipole to consider.
 69        lmins (dict):
 70            Dictionary of lmin values.
 71        like (str):
 72            Type of likelihood to use.
 73        cl_file (str):
 74            Path to Cl file.
 75        fiduCLS (dict):
 76            Dictionary of fiducial Cls.
 77        noiseCLS (dict):
 78            Dictionary of noise Cls.
 79        fiduCOV (np.ndarray):
 80            Fiducial covariance matrix obtained from the corresponding dictionary.
 81        noiseCOV (np.ndarray):
 82            Noise covariance matrix obtained from the corresponding dictionary.
 83        data (np.ndarray):
 84            Data vector obtained by summing fiduCOV + noiseCOV.
 85        cobaCLS (dict):
 86            Dictionary of Cobaya Cls.
 87        cobaCOV (np.ndarray):
 88            Cobaya covariance matrix obtained from the corresponding dictionary.
 89        coba (np.ndarray):
 90            Cobaya vector obtained by summing cobaCOV + noiseCOV.
 91        nl_file (str):
 92            Path to noise file.
 93        experiment (str):
 94            Name of experiment.
 95        nside (int):
 96            Nside of the map.
 97        r (float):
 98            Tensor-to-scalar ratio.
 99        nt (float):
100            Tensor spectral tilt.
101        pivot_t (float):
102            Pivot scale of the tensor primordial power spectrum.
103        sep (str):
104            Separator used in the data file.
105        debug (bool):
106            If True, produces more output.
107    """
108
109    def __init__(
110        self,
111        name=None,
112        fields=None,
113        lmax=None,
114        like="exact",
115        lmin=2,
116        cl_file=None,
117        nl_file=None,
118        experiment=None,
119        nside=None,
120        r=None,
121        nt=None,
122        pivot_t=0.01,
123        fsky=1,
124        sep="",
125        debug=None,
126    ):
127        # Check that the user has provided the name of the likelihood
128        assert (
129            name is not None
130        ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')"
131        # Check that the user has provided the fields
132        assert (
133            fields is not None
134        ), "You must provide the fields (e.g. 'b' or ['t', 'e'])"
135        # Check that the user has provided the maximum multipole
136        assert lmax is not None, "You must provide the lmax (e.g. 300)"
137
138        self.fields = fields
139        self.n = len(fields)
140        self.lmin = lmin
141        self.like = like
142        self.sep = sep
143        self.cl_file = cl_file
144        self.nl_file = nl_file
145        self.experiment = experiment
146        if self.experiment is not None:
147            # Check that the user has provided the nside if an experiment is used
148            assert nside is not None, "You must provide an nside to compute the noise"
149            self.nside = nside
150        self.debug = debug
151        self.keys = self.get_keys()
152        if "bb" in self.keys:
153            # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used
154            assert (
155                r is not None
156            ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)"
157            self.r = r
158            self.nt = nt
159            self.pivot_t = pivot_t
160
161        self.set_lmin_lmax_fsky(lmin, lmax, fsky)
162
163        Likelihood.__init__(self, name=name)
164
165    def set_lmin_lmax_fsky(self, lmin, lmax, fsky):
166        """Take lmin, lmax and fsky parameters and set the corresponding attributes.
167
168        Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.
169
170        Parameters:
171            lmin (int or list):
172                Value or list of values of lmin.
173            lmax (int or list):
174                Value or list of values of lmax.
175            fsky (float or list):
176                Value or list of values of fsky.
177        """
178
179        self.lmins = {}
180        self.lmaxs = {}
181        self.fskies = {}
182
183        # Set lmin
184        if isinstance(lmin, list):
185            assert (
186                len(lmin) == self.n
187            ), "If you provide multiple lmin, they must match the number of requested fields with the same order"
188            for i in range(self.n):
189                for j in range(i, self.n):
190                    key = self.fields[i] + self.sep + self.fields[j]
191                    self.lmins[key] = int(
192                        np.ceil(np.sqrt(lmin[i] * lmin[j]))
193                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
194                    self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j])))
195            self.lmin = min(lmin)
196        else:
197            self.lmin = lmin
198
199        # Set lmax
200        if isinstance(lmax, list):
201            assert (
202                len(lmax) == self.n
203            ), "If you provide multiple lmax, they must match the number of requested fields with the same order"
204            for i in range(self.n):
205                for j in range(i, self.n):
206                    key = self.fields[i] + self.sep + self.fields[j]
207                    self.lmaxs[key] = int(
208                        np.floor(np.sqrt(lmax[i] * lmax[j]))
209                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
210                    self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j])))
211            self.lmax = max(lmax)
212        else:
213            self.lmax = lmax
214
215        # Set fsky
216        if isinstance(fsky, list):
217            assert (
218                len(fsky) == self.n
219            ), "If you provide multiple fsky, they must match the number of requested fields with the same order"
220            for i in range(self.n):
221                for j in range(i, self.n):
222                    key = self.fields[i] + self.sep + self.fields[j]
223                    self.fskies[key] = np.sqrt(
224                        fsky[i] * fsky[j]
225                    )  # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations)
226                    self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j])
227            self.fsky = None
228        else:
229            self.fsky = fsky
230        return
231
232    def cov_filling(self, cov_dict):
233        """Fill covariance matrix with appropriate spectra.
234
235        Computes the covariance matrix once given a dictionary. Returns the covariance matrix of the considered fields, in a shape equal to (num_fields x num_fields x lmax). Note that if more than one lmax, or lmin, is specified, there will be null values in the matrices, making them singular. This will be handled in another method.
236
237        Parameters:
238            cov_dict (dict):
239                The input dictionary of spectra.
240        """
241        # Initialize output array
242        res = np.zeros((self.n, self.n, self.lmax + 1))
243
244        # Loop over field1
245        for i, field1 in enumerate(self.fields):
246            # Loop over field2
247            for j, field2 in enumerate(self.fields[i:]):
248                # Get the index of field2
249                j += i
250
251                # Get the key of the covariance matrix
252                key = field1 + self.sep + field2
253
254                # Get lmin and lmax for this field pair
255                lmin = self.lmins.get(key, self.lmin)
256                lmax = self.lmaxs.get(key, self.lmax)
257
258                # Get the covariance for this field pair
259                cov = cov_dict.get(key, np.zeros(lmax + 1))
260
261                # Set the appropriate values in the covariance matrix
262                res[i, j, lmin : lmax + 1] = cov[lmin : lmax + 1]
263                # Fill the covariance matrix symmetrically
264                res[j, i] = res[i, j]
265
266        return res
267
268    def get_keys(self):
269        """Extracts the keys that has to be used as a function of the requested fields. These will be the usual 2-points, e.g., tt, te, ee, etc."""
270        # List of all the possible combinations of the requested fields
271        res = [
272            self.fields[i] + self.sep + self.fields[j]
273            for i in range(self.n)
274            for j in range(i, self.n)
275        ]
276        # Print the requested keys
277        if self.debug:
278            print(f"\nThe requested keys are {res}")
279        return res
280
281    def get_Gauss_keys(self):
282        """Find the proper dictionary keys for the requested fields.
283
284        Extracts the keys that has to be used as a function of the requested fields for the Gaussian likelihood. Indeed, the Gaussian likelihood is computed using 4-points, so the keys are different. E.g., there will be keys such as tttt, ttee, tete, etc.
285        """
286        # Calculate the number of elements in the covariance matrix
287        n = int(self.n * (self.n + 1) / 2)
288        # Initialize a 3-d array to store the keys
289        res = np.zeros((n, n, 4), dtype=str)
290        # Loop over all the elements in the covariance matrix
291        for i in range(n):
292            for j in range(i, n):
293                # Generate a key for the i-th and j-th element
294                elem = self.keys[i] + self.sep + self.keys[j]
295                # Loop over all the characters in the key
296                for k in range(4):
297                    # Add the k-th character to the i-th, j-th, and k-th
298                    # indices of the array
299                    res[i, j, k] = np.asarray(list(elem)[k])
300                    res[j, i, k] = res[i, j, k]
301        # Print the keys if the debug flag is set
302        if self.debug:
303            print(f"\nThe requested keys are {res}")
304        # Return the keys
305        return res
306
307    def find_spectrum(self, input_dict, key):
308        """Find a spectrum in a given dictionary.
309
310        Returns the corresponding power sepctrum for a given key. If the key is not found, it will try to find the reverse key. Otherwise it will fill the array with zeros.
311
312        Parameters:
313            input_dict (dict):
314                Dictionary where you want to search for keys.
315
316            key (str):
317                Key to search for.
318        """
319        # create a zero array
320        res = np.zeros(self.lmax + 1)
321
322        # get lmin and lmax
323        lmin = self.lmins.get(key, self.lmin)
324        lmax = self.lmaxs.get(key, self.lmax)
325
326        # try to find the key in the dictionary
327        if key in input_dict:
328            cov = input_dict[key]
329        # if the key is not found, try the reverse key
330        else:
331            cov = input_dict.get(key[::-1], np.zeros(lmax + 1))
332
333        # fill the array with the requested spectrum
334        res[lmin : lmax + 1] = cov[lmin : lmax + 1]
335
336        return res
337
338    def sigma(self, keys, fiduDICT, noiseDICT):
339        """Define the covariance matrix for the Gaussian case.
340
341        In case of Gaussian likelihood, this returns the covariance matrix needed for the computation of the chi2. Note that the inversion is done in a separate funciton.
342
343        Parameters:
344            keys (dict):
345                Keys for the covariance elements.
346
347            fiduDICT (dict):
348                Dictionary with the fiducial spectra.
349
350            noiseDICT (dict):
351                Dictionary with the noise spectra.
352        """
353        # The covariance matrix has to be symmetric.
354        # The number of parameters in the likelihood is self.n.
355        # The covariance matrix is a (self.n x self.n x self.lmax+1) ndarray.
356        # We will store the covariance matrix in a (n x n x self.lmax+1) ndarray,
357        # where n = int(self.n * (self.n + 1) / 2).
358        n = int(self.n * (self.n + 1) / 2)
359        res = np.zeros((n, n, self.lmax + 1))
360        for i in range(n):  # Loop over all combinations of pairs of spectra
361            for j in range(i, n):
362                C_AC = self.find_spectrum(
363                    fiduDICT, keys[i, j, 0] + keys[i, j, 2]
364                )  # Find the fiducial spectra for each pair
365                C_BD = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 3])
366                C_AD = self.find_spectrum(fiduDICT, keys[i, j, 0] + keys[i, j, 3])
367                C_BC = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 2])
368                N_AC = self.find_spectrum(
369                    noiseDICT, keys[i, j, 0] + keys[i, j, 2]
370                )  # Find the noise spectra for each pair
371                N_BD = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 3])
372                N_AD = self.find_spectrum(noiseDICT, keys[i, j, 0] + keys[i, j, 3])
373                N_BC = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 2])
374                if self.fsky is not None:  # If self.fsky is defined, use the fsky value
375                    res[i, j] = (
376                        (C_AC + N_AC) * (C_BD + N_BD) + (C_AD + N_AD) * (C_BC + N_BC)
377                    ) / self.fsky
378                else:  # Otherwise, use the fsky values from the input spectra
379                    AC = keys[i, j, 0] + keys[i, j, 2]
380                    BD = keys[i, j, 1] + keys[i, j, 3]
381                    AD = keys[i, j, 0] + keys[i, j, 3]
382                    BC = keys[i, j, 1] + keys[i, j, 2]
383                    AB = keys[i, j, 0] + keys[i, j, 1]
384                    CD = keys[i, j, 2] + keys[i, j, 3]
385                    res[i, j] = (
386                        np.sqrt(self.fskies[AC] * self.fskies[BD])
387                        * (C_AC + N_AC)
388                        * (C_BD + N_BD)
389                        + np.sqrt(self.fskies[AD] * self.fskies[BC])
390                        * (C_AD + N_AD)
391                        * (C_BC + N_BC)
392                    ) / (self.fskies[AB] * self.fskies[CD])
393                res[j, i] = res[i, j]
394        return res
395
396    def inv_sigma(self, sigma):
397        """Invert the covariance matrix of the Gaussian case.
398
399        Inverts the previously calculated sigma ndarray. Note that some elements may be null, thus the covariance may be singular. If so, this also reduces the dimension of the matrix by deleting the corresponding row and column.
400
401        Parameters:
402            ndarray (np.ndarray):
403                (self.n x self.n x self.lmax+1) ndarray with the previously computed sigma (not inverted).
404        """
405        # Initialize array to store the inverted covariance matrices
406        res = np.zeros(self.lmax + 1, dtype=object)
407
408        # Loop over multipoles
409        for i in range(self.lmax + 1):
410            # Check if matrix is singular
411            COV = sigma[:, :, i]
412            if np.linalg.det(COV) == 0:
413                # Get indices of null diagonal elements
414                idx = np.where(np.diag(COV) == 0)[0]
415                # Remove corresponding rows and columns
416                COV = np.delete(COV, idx, axis=0)
417                COV = np.delete(COV, idx, axis=1)
418            # Invert matrix
419            res[i] = np.linalg.inv(COV)
420        return res[2:]
421
422    def get_reduced_data(self, mat):
423        """Find the reduced data eliminating the singularity of the matrix.
424
425        Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.
426
427        Parameters:
428            ndarray (np.ndarray):
429                A ndarray containing the covariance matrices, with some singular ones.
430        """
431        # Select the indices corresponding to the zero diagonal
432        idx = np.where(np.diag(mat) == 0)[0]
433        # Delete the rows and columns from the matrix
434        return np.delete(np.delete(mat, idx, axis=0), idx, axis=1)
435
436    def CAMBres2dict(self, camb_results):
437        """Takes the CAMB result product from get_cmb_power_spectra and convert it to a dictionary with the proper keys.
438
439        Parameters:
440            camb_results (CAMBdata):
441                CAMB result product from the method get_cmb_power_spectra.
442        """
443        # Get the number of multipoles
444        ls = np.arange(camb_results["total"].shape[0], dtype=np.int64)
445        # Mapping between the CAMB keys and the ones we want
446        mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
447        # Initialize the output dictionary
448        res = {"ell": ls}
449        # Loop over the keys we want
450        for key, i in mapping.items():
451            # Save the results
452            res[key] = camb_results["total"][:, i]
453        # Check if we want the lensing potential
454        if "pp" in self.keys:
455            # Get the lensing potential
456            cl_lens = camb_results.get("lens_potential")
457            # Check if it exists
458            if cl_lens is not None:
459                # Save it
460                res["pp"] = cl_lens[:, 0].copy()
461                # Check if we want the cross terms
462                if "pt" in self.keys and "pe" in self.keys:
463                    # Loop over the cross terms
464                    for i, cross in enumerate(["pt", "pe"]):
465                        # Save the result
466                        res[cross] = cl_lens[:, i + 1].copy()
467                        # Save the symmetric term
468                        res[cross[::-1]] = res[cross]
469        return res
470
471    def txt2dict(self, txt, mapping=None, apply_ellfactor=None):
472        """Takes a txt file and convert it to a dictionary. This requires a way to map the columns to the keys. Also, it is possible to apply an ell factor to the Cls.
473
474        Parameters:
475            txt (str):
476                Path to txt file containing the spectra as columns.
477            mapping (dict):
478                Dictionary containing the mapping. Keywords will become the new keywords and values represent the index of the corresponding column.
479        """
480        # Define the ell values from the length of the txt file
481        assert (
482            mapping is not None
483        ), "You must provide a way to map the columns of your txt to the keys of a dictionary"
484        ls = np.arange(txt.shape[0], dtype=np.int64)
485        res = {"ell": ls}
486        # Loop over the mapping and extract the corresponding column from the txt file
487        # and store it in the dictionary under the corresponding keyword
488        for key, i in mapping.items():
489            if apply_ellfactor:
490                res[key] = txt[:, i] * ls * (ls + 1) / 2 / np.pi
491            else:
492                res[key] = txt[:, i]
493        return res
494
495    def prod_fidu(self):
496        """Produce fiducial spectra or read the input ones.
497
498        If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.
499        """
500        # If a custom file is provided, use that
501        if self.cl_file is not None:
502            # If the file is a pickle file, load it
503            if self.cl_file.endswith(".pkl"):
504                with open(self.cl_file, "rb") as pickle_file:
505                    res = pickle.load(pickle_file)
506            # Otherwise, load it as text file
507            else:
508                txt = np.loadtxt(self.cl_file)
509                mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
510                res = self.txt2dict(txt, mapping)
511            return res
512
513        try:
514            import camb
515        except ImportError:
516            print("CAMB seems to be not installed. Check the requirements.")
517
518        # Read the ini file containing the parameters for CAMB
519        path = os.path.dirname(os.path.abspath(__file__))
520        planck_path = os.path.join(path, "planck_2018.ini")
521        pars = camb.read_ini(planck_path)
522
523        if "bb" in self.keys:  # If we want to include the tensor mode
524            print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}")
525            pars.InitPower.set_params(
526                As=2.100549e-9,
527                ns=0.9660499,
528                r=self.r,
529                nt=self.nt,
530                pivot_tensor=self.pivot_t,
531                pivot_scalar=0.05,
532                parameterization=2,
533            )
534            pars.WantTensors = True
535            pars.Accuracy.AccurateBB = True
536        pars.DoLensing = True
537        # _pars.Accuracy.AccuracyBoost = 2 # This helps getting an extra squeeze on the accordance of Cobaya and Fiducial spectra
538
539        if self.debug:
540            print(pars)
541
542        results = camb.get_results(pars)
543        res = results.get_cmb_power_spectra(
544            CMB_unit="muK",
545            lmax=self.lmax,
546            raw_cl=False,
547        )
548        return self.CAMBres2dict(res)
549
550    def prod_noise(self):
551        """Produce noise power spectra or read the input ones.
552
553        If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.
554        """
555        # If the input noise file is a pickle file, load it.
556        if self.nl_file is not None:
557            if self.nl_file.endswith(".pkl"):
558                with open(self.nl_file, "rb") as pickle_file:
559                    res = pickle.load(pickle_file)
560            # If not, load the file as a text file
561            else:
562                _txt = np.loadtxt(self.nl_file)
563                _mapping = {"bb": 0}
564                # Convert the text file to a dictionary
565                res = self.txt2dict(_txt, _mapping, apply_ellfactor=True)
566            return res
567
568        print(
569            "***WARNING***: the inverse noise weighting performed here severely underestimates \
570            the actual noise level of LiteBIRD. You should provide an input \
571            noise power spectrum with a more realistic noise."
572        )
573
574        try:
575            import yaml
576            from yaml.loader import SafeLoader
577            import healpy as hp
578        except ImportError:
579            print("YAML or Healpy seems to be not installed. Check the requirements.")
580
581        assert (
582            self.experiment is not None
583        ), "You must specify the experiment you want to consider"
584        print(f"\nComputing noise for {self.experiment}")
585
586        path = os.path.dirname(os.path.abspath(__file__))
587        experiments_path = os.path.join(path, "experiments.yaml")
588        with open(experiments_path) as f:
589            data = yaml.load(f, Loader=SafeLoader)
590
591        # Get the instrument data from the saved data
592        instrument = data[self.experiment]
593
594        # Get the FWHM values from the instrument data
595        fwhms = np.array(instrument["fwhm"])
596
597        # Get the frequency values from the instrument data
598        freqs = np.array(instrument["frequency"])
599
600        # Get the depth values from the instrument data
601        depth_p = np.array(instrument["depth_p"])
602        depth_i = np.array(instrument["depth_i"])
603
604        # Convert the depth to a pixel value
605        depth_p /= hp.nside2resol(self.nside, arcmin=True)
606        depth_i /= hp.nside2resol(self.nside, arcmin=True)
607        depth_p = depth_p * np.sqrt(
608            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
609        )
610        depth_i = depth_i * np.sqrt(
611            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
612        )
613
614        # Get the number of frequencies
615        n_freq = len(freqs)
616
617        # Define the ell values as a numpy array
618        ell = np.arange(0, self.lmax + 1, 1)
619
620        # Define the keys for the dictionary that will be returned
621        keys = ["tt", "ee", "bb"]
622
623        sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0))
624        sigma2 = sigma**2
625
626        # Calculate the Gaussian beam function
627        g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis])
628
629        # Calculate the polarization factor
630        pol_factor = np.array(
631            [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2],
632        )
633
634        # Calculate the polarization factor as a function of ell
635        pol_factor = np.exp(pol_factor)
636
637        # Calculate the Gaussian beam function for each polarization
638        G = []
639        for i, arr in enumerate(pol_factor):
640            G.append(g * arr[:, np.newaxis])
641        g = np.array(G)
642
643        # Initialize the dictionary that will be returned
644        res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys}
645
646        # Calculate the unnormalized power spectra
647        res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2)
648        res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
649        res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
650
651        # Calculate the normalized power spectra
652        res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi
653        res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi
654        res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi
655
656        res["tt"][:2] = [0, 0]
657        res["ee"][:2] = [0, 0]
658        res["bb"][:2] = [0, 0]
659
660        return res
661
662    def initialize(self):
663        """Initializes the fiducial spectra and the noise power spectra."""
664        # Compute the fiducial and noise power spectra
665        self.fiduCLS = self.prod_fidu()
666        self.noiseCLS = self.prod_noise()
667
668        # Compute the covariance matrices
669        self.fiduCOV = self.cov_filling(self.fiduCLS)
670        self.noiseCOV = self.cov_filling(self.noiseCLS)
671
672        # Print some information for debugging
673        if self.debug:
674            print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}")
675            print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}")
676
677            field = "yy"
678            print("\nPrinting the first few values to check that it starts from 0...")
679            print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}")
680            print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}")
681
682        # Compute the total covariance matrix
683        self.data = (
684            self.fiduCOV[:, :, self.lmin : self.lmax + 1]
685            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
686        )
687
688        # Compute the inverse of the covariance matrix
689        if self.like == "gaussian":
690            self.gauss_keys = self.get_Gauss_keys()
691            sigma2 = self.sigma(self.gauss_keys, self.fiduCLS, self.noiseCLS)
692            self.sigma2 = self.inv_sigma(sigma2)
693
694    def get_requirements(self):
695        """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing."""
696        # The likelihood needs the lensed CMB angular power spectra. The keyword can be set to "unlensed_Cl" to get the unlensed ones
697        requitements = {}
698        requitements["Cl"] = {cl: self.lmax for cl in self.keys}
699        # If debug is set to True, the likelihood will print the list of items required by the likelihood
700        if self.debug:
701            requitements["CAMBdata"] = None
702            print(
703                f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}",
704            )
705        return requitements
706
707    def data_vector(self, cov):
708        """Get data vector from the covariance matrix.
709
710        Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.
711
712        Parameters:
713            cov (np.ndarray):
714                A ndarray containing the covariance matrices, with some null ones.
715        """
716        return cov[np.triu_indices(self.n)][cov[np.triu_indices(self.n)] != 0]
717
718    def chi_exact(self, i=0):
719        """Computes proper chi-square term for the exact likelihood case.
720
721        Parameters:
722            i (int, optional):
723                ell index if needed. Defaults to 0.
724        """
725        # If the number of datasets is not equal to 1, then we have a
726        # multi-dataset case, in which case we need to compute the
727        # covariance matrix for each dataset.
728        if self.n != 1:
729            # We extract the covariance matrix and data for the ith
730            # dataset.
731            coba = self.coba[:, :, i]
732            data = self.data[:, :, i]
733            det = np.linalg.det(coba)
734            # If the determinant is equal to 0, then we need to reduce
735            # the dimensionality of the data and covariance matrix.
736            if det == 0:
737                data = self.get_reduced_data(data)
738                coba = self.get_reduced_data(coba)
739            # We compute the matrix M using the covariance matrix and
740            # the data.
741            M = np.linalg.solve(coba, data)
742            # We compute the chi-square term using the trace of M, the
743            # log determinant of M, and the number of fields.
744            return np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0]
745        # If the number of datasets is equal to 1, then we have a single
746        # dataset case, in which case we do not need to loop over the
747        # datasets.
748        else:
749            # We compute the matrix M using the covariance matrix and
750            # the data.
751            M = self.data / self.coba
752            # We compute the chi-square term using M, the log of M, and
753            # a constant value.
754            return M - np.log(np.abs(M)) - 1
755
756    def chi_gaussian(self, i=0):
757        """Computes proper chi-square term for the Gaussian likelihood case.
758
759        Parameters:
760            i (int, optional):
761                ell index if needed. Defaults to 0.
762        """
763        # If we have more than one data vector
764        if self.n != 1:
765            coba = self.data_vector(self.coba[:, :, i])
766            data = self.data_vector(self.data[:, :, i])
767            return (coba - data) @ self.sigma2[i] @ (coba - data)
768        # If we have only one data vector
769        else:
770            coba = self.coba[0, 0, :]
771            data = self.data[0, 0, :]
772            res = (coba - data) * self.sigma2 * (coba - data)
773            return res
774
775    def compute_chi_part(self, i=0):
776        """Chooses which chi-square term to compute.
777
778        Parameters:
779            i (int, optional):
780                ell index if needed. Defaults to 0.
781        """
782        # check if the likelihood is "exact"
783        if self.like == "exact":
784            # if so, compute the chi-square term for the exact likelihood
785            return self.chi_exact(i)
786        # if not, check if it is "gaussian"
787        elif self.like == "gaussian":
788            # if so, compute the chi-square term for the gaussian likelihood
789            return self.chi_gaussian(i)
790        # if neither, print an error message
791        else:
792            print("You requested something different from 'exact or 'gaussian'!")
793            return
794
795    def log_likelihood(self):
796        """Computes the log likelihood."""
797        # Get the array of multipoles
798        ell = np.arange(self.lmin, self.lmax + 1, 1)
799        # Compute the log likelihood for each multipole
800        if self.n != 1:
801            logp_ℓ = np.zeros(ell.shape)
802            for i in range(0, self.lmax + 1 - self.lmin):
803                logp_ℓ[i] = -0.5 * (2 * ell[i] + 1) * self.compute_chi_part(i)
804        else:
805            logp_ℓ = -0.5 * (2 * ell + 1) * self.compute_chi_part()
806        # Sum the log likelihood over multipoles
807        return np.sum(logp_ℓ)
808
809    def logp(self, **params_values):
810        """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process."""
811        if self.debug:
812            CAMBdata = self.provider.get_CAMBdata()
813            pars = CAMBdata.Params
814            print(pars)
815
816        # Get the Cls from Cobaya
817        self.cobaCLs = self.provider.get_Cl(ell_factor=True)
818
819        if self.debug:
820            print(f"Keys of Cobaya CLs ---> {self.cobaCLs.keys()}")
821
822            field = "yy"
823            print("\nPrinting the first few values to check that it starts from 0...")
824            print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLs[field][0:5]}")
825
826        # Fill the covariance matrix with the Cls from Cobaya
827        self.cobaCOV = self.cov_filling(self, self.cobaCLs)
828
829        if self.debug:
830            ell = np.arange(0, self.lmax + 1, 1)
831            plt.loglog(ell, self.fiduCOV[0, 0, :], label="Fiducial CLs")
832            plt.loglog(ell, self.cobaCOV[0, 0, :], label="Cobaya CLs", ls="--")
833            plt.loglog(ell, self.noiseCOV[0, 0, :], label="Noise CLs")
834            plt.xlim(2, None)
835            plt.legend()
836            plt.show()
837
838        # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya
839        self.coba = (
840            self.cobaCOV[:, :, self.lmin : self.lmax + 1]
841            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
842        )
843
844        # Compute the likelihood
845        logp = self.log_likelihood()
846
847        if self.debug:
848            print(logp)
849            exit()
850
851        return logp
852
853
854__all__ = ["LiLit"]
855
856__docformat__ = "google"
857__pdoc__ = {}
858__pdoc__[
859    "Likelihood"
860] = "Likelihood class from Cobaya, refer to Cobaya documentation for more information."
class LiLit(cobaya.likelihood.Likelihood):
 10class LiLit(Likelihood):
 11
 12    """Class defining the Likelihood for LiteBIRD (LiLit).
 13
 14    Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing.
 15
 16    Parameters:
 17        name (str):
 18            The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None).
 19        fields (list):
 20            List of fields in the data file (default: None).
 21        lmax (int or list):
 22            Maximum multipole to consider (default: None).
 23        like (str, optional):
 24            Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian".
 25        lmin (int or list):
 26            Minimum multipole to consider (default: 2).
 27        cl_file (str, optional):
 28            Path to Cl file (default: None).
 29        nl_file (str, optional):
 30            Path to noise file (default: None).
 31        experiment (str, optional):
 32            Name of experiment (default: None).
 33        nside (int, optional):
 34            Nside of the map (default: None).
 35        r (float, optional):
 36            Tensor-to-scalar ratio (default: None).
 37        nt (float, optional):
 38            Tensor spectral tilt (default: None).
 39        pivot_t (float, optional):
 40            Pivot scale of the tensor primordial power spectrum (default: 0.01).
 41        fsky (float or list):
 42            Sky fraction (default: 1).
 43        sep (str, optional):
 44            Separator used in the data file (default: "").
 45        debug (bool, optional):
 46            If True, produces more verbose output (default: None).
 47
 48
 49    Attributes:
 50        fields (list):
 51            List of fields in the data file.
 52        n_fields (int):
 53            Number of fields.
 54        keys (list):
 55            List of keywords for the dictionaries.
 56        gauss_keys (list):
 57            List of keywords for the Gaussian likelihood (4-points).
 58        sigma2 (np.ndarray):
 59            Array of covariances for the Gaussian likelihood case.
 60        lmax (int or list):
 61            List of lmax values.
 62        lmaxes (dict):
 63            Dictionary of lmax values.
 64        fsky (int or list):
 65            List of fsky values.
 66        fskies (dict):
 67            Dictionary of fsky values.
 68        lmin (int or list):
 69            Minimum multipole to consider.
 70        lmins (dict):
 71            Dictionary of lmin values.
 72        like (str):
 73            Type of likelihood to use.
 74        cl_file (str):
 75            Path to Cl file.
 76        fiduCLS (dict):
 77            Dictionary of fiducial Cls.
 78        noiseCLS (dict):
 79            Dictionary of noise Cls.
 80        fiduCOV (np.ndarray):
 81            Fiducial covariance matrix obtained from the corresponding dictionary.
 82        noiseCOV (np.ndarray):
 83            Noise covariance matrix obtained from the corresponding dictionary.
 84        data (np.ndarray):
 85            Data vector obtained by summing fiduCOV + noiseCOV.
 86        cobaCLS (dict):
 87            Dictionary of Cobaya Cls.
 88        cobaCOV (np.ndarray):
 89            Cobaya covariance matrix obtained from the corresponding dictionary.
 90        coba (np.ndarray):
 91            Cobaya vector obtained by summing cobaCOV + noiseCOV.
 92        nl_file (str):
 93            Path to noise file.
 94        experiment (str):
 95            Name of experiment.
 96        nside (int):
 97            Nside of the map.
 98        r (float):
 99            Tensor-to-scalar ratio.
100        nt (float):
101            Tensor spectral tilt.
102        pivot_t (float):
103            Pivot scale of the tensor primordial power spectrum.
104        sep (str):
105            Separator used in the data file.
106        debug (bool):
107            If True, produces more output.
108    """
109
110    def __init__(
111        self,
112        name=None,
113        fields=None,
114        lmax=None,
115        like="exact",
116        lmin=2,
117        cl_file=None,
118        nl_file=None,
119        experiment=None,
120        nside=None,
121        r=None,
122        nt=None,
123        pivot_t=0.01,
124        fsky=1,
125        sep="",
126        debug=None,
127    ):
128        # Check that the user has provided the name of the likelihood
129        assert (
130            name is not None
131        ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')"
132        # Check that the user has provided the fields
133        assert (
134            fields is not None
135        ), "You must provide the fields (e.g. 'b' or ['t', 'e'])"
136        # Check that the user has provided the maximum multipole
137        assert lmax is not None, "You must provide the lmax (e.g. 300)"
138
139        self.fields = fields
140        self.n = len(fields)
141        self.lmin = lmin
142        self.like = like
143        self.sep = sep
144        self.cl_file = cl_file
145        self.nl_file = nl_file
146        self.experiment = experiment
147        if self.experiment is not None:
148            # Check that the user has provided the nside if an experiment is used
149            assert nside is not None, "You must provide an nside to compute the noise"
150            self.nside = nside
151        self.debug = debug
152        self.keys = self.get_keys()
153        if "bb" in self.keys:
154            # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used
155            assert (
156                r is not None
157            ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)"
158            self.r = r
159            self.nt = nt
160            self.pivot_t = pivot_t
161
162        self.set_lmin_lmax_fsky(lmin, lmax, fsky)
163
164        Likelihood.__init__(self, name=name)
165
166    def set_lmin_lmax_fsky(self, lmin, lmax, fsky):
167        """Take lmin, lmax and fsky parameters and set the corresponding attributes.
168
169        Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.
170
171        Parameters:
172            lmin (int or list):
173                Value or list of values of lmin.
174            lmax (int or list):
175                Value or list of values of lmax.
176            fsky (float or list):
177                Value or list of values of fsky.
178        """
179
180        self.lmins = {}
181        self.lmaxs = {}
182        self.fskies = {}
183
184        # Set lmin
185        if isinstance(lmin, list):
186            assert (
187                len(lmin) == self.n
188            ), "If you provide multiple lmin, they must match the number of requested fields with the same order"
189            for i in range(self.n):
190                for j in range(i, self.n):
191                    key = self.fields[i] + self.sep + self.fields[j]
192                    self.lmins[key] = int(
193                        np.ceil(np.sqrt(lmin[i] * lmin[j]))
194                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
195                    self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j])))
196            self.lmin = min(lmin)
197        else:
198            self.lmin = lmin
199
200        # Set lmax
201        if isinstance(lmax, list):
202            assert (
203                len(lmax) == self.n
204            ), "If you provide multiple lmax, they must match the number of requested fields with the same order"
205            for i in range(self.n):
206                for j in range(i, self.n):
207                    key = self.fields[i] + self.sep + self.fields[j]
208                    self.lmaxs[key] = int(
209                        np.floor(np.sqrt(lmax[i] * lmax[j]))
210                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
211                    self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j])))
212            self.lmax = max(lmax)
213        else:
214            self.lmax = lmax
215
216        # Set fsky
217        if isinstance(fsky, list):
218            assert (
219                len(fsky) == self.n
220            ), "If you provide multiple fsky, they must match the number of requested fields with the same order"
221            for i in range(self.n):
222                for j in range(i, self.n):
223                    key = self.fields[i] + self.sep + self.fields[j]
224                    self.fskies[key] = np.sqrt(
225                        fsky[i] * fsky[j]
226                    )  # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations)
227                    self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j])
228            self.fsky = None
229        else:
230            self.fsky = fsky
231        return
232
233    def cov_filling(self, cov_dict):
234        """Fill covariance matrix with appropriate spectra.
235
236        Computes the covariance matrix once given a dictionary. Returns the covariance matrix of the considered fields, in a shape equal to (num_fields x num_fields x lmax). Note that if more than one lmax, or lmin, is specified, there will be null values in the matrices, making them singular. This will be handled in another method.
237
238        Parameters:
239            cov_dict (dict):
240                The input dictionary of spectra.
241        """
242        # Initialize output array
243        res = np.zeros((self.n, self.n, self.lmax + 1))
244
245        # Loop over field1
246        for i, field1 in enumerate(self.fields):
247            # Loop over field2
248            for j, field2 in enumerate(self.fields[i:]):
249                # Get the index of field2
250                j += i
251
252                # Get the key of the covariance matrix
253                key = field1 + self.sep + field2
254
255                # Get lmin and lmax for this field pair
256                lmin = self.lmins.get(key, self.lmin)
257                lmax = self.lmaxs.get(key, self.lmax)
258
259                # Get the covariance for this field pair
260                cov = cov_dict.get(key, np.zeros(lmax + 1))
261
262                # Set the appropriate values in the covariance matrix
263                res[i, j, lmin : lmax + 1] = cov[lmin : lmax + 1]
264                # Fill the covariance matrix symmetrically
265                res[j, i] = res[i, j]
266
267        return res
268
269    def get_keys(self):
270        """Extracts the keys that has to be used as a function of the requested fields. These will be the usual 2-points, e.g., tt, te, ee, etc."""
271        # List of all the possible combinations of the requested fields
272        res = [
273            self.fields[i] + self.sep + self.fields[j]
274            for i in range(self.n)
275            for j in range(i, self.n)
276        ]
277        # Print the requested keys
278        if self.debug:
279            print(f"\nThe requested keys are {res}")
280        return res
281
282    def get_Gauss_keys(self):
283        """Find the proper dictionary keys for the requested fields.
284
285        Extracts the keys that has to be used as a function of the requested fields for the Gaussian likelihood. Indeed, the Gaussian likelihood is computed using 4-points, so the keys are different. E.g., there will be keys such as tttt, ttee, tete, etc.
286        """
287        # Calculate the number of elements in the covariance matrix
288        n = int(self.n * (self.n + 1) / 2)
289        # Initialize a 3-d array to store the keys
290        res = np.zeros((n, n, 4), dtype=str)
291        # Loop over all the elements in the covariance matrix
292        for i in range(n):
293            for j in range(i, n):
294                # Generate a key for the i-th and j-th element
295                elem = self.keys[i] + self.sep + self.keys[j]
296                # Loop over all the characters in the key
297                for k in range(4):
298                    # Add the k-th character to the i-th, j-th, and k-th
299                    # indices of the array
300                    res[i, j, k] = np.asarray(list(elem)[k])
301                    res[j, i, k] = res[i, j, k]
302        # Print the keys if the debug flag is set
303        if self.debug:
304            print(f"\nThe requested keys are {res}")
305        # Return the keys
306        return res
307
308    def find_spectrum(self, input_dict, key):
309        """Find a spectrum in a given dictionary.
310
311        Returns the corresponding power sepctrum for a given key. If the key is not found, it will try to find the reverse key. Otherwise it will fill the array with zeros.
312
313        Parameters:
314            input_dict (dict):
315                Dictionary where you want to search for keys.
316
317            key (str):
318                Key to search for.
319        """
320        # create a zero array
321        res = np.zeros(self.lmax + 1)
322
323        # get lmin and lmax
324        lmin = self.lmins.get(key, self.lmin)
325        lmax = self.lmaxs.get(key, self.lmax)
326
327        # try to find the key in the dictionary
328        if key in input_dict:
329            cov = input_dict[key]
330        # if the key is not found, try the reverse key
331        else:
332            cov = input_dict.get(key[::-1], np.zeros(lmax + 1))
333
334        # fill the array with the requested spectrum
335        res[lmin : lmax + 1] = cov[lmin : lmax + 1]
336
337        return res
338
339    def sigma(self, keys, fiduDICT, noiseDICT):
340        """Define the covariance matrix for the Gaussian case.
341
342        In case of Gaussian likelihood, this returns the covariance matrix needed for the computation of the chi2. Note that the inversion is done in a separate funciton.
343
344        Parameters:
345            keys (dict):
346                Keys for the covariance elements.
347
348            fiduDICT (dict):
349                Dictionary with the fiducial spectra.
350
351            noiseDICT (dict):
352                Dictionary with the noise spectra.
353        """
354        # The covariance matrix has to be symmetric.
355        # The number of parameters in the likelihood is self.n.
356        # The covariance matrix is a (self.n x self.n x self.lmax+1) ndarray.
357        # We will store the covariance matrix in a (n x n x self.lmax+1) ndarray,
358        # where n = int(self.n * (self.n + 1) / 2).
359        n = int(self.n * (self.n + 1) / 2)
360        res = np.zeros((n, n, self.lmax + 1))
361        for i in range(n):  # Loop over all combinations of pairs of spectra
362            for j in range(i, n):
363                C_AC = self.find_spectrum(
364                    fiduDICT, keys[i, j, 0] + keys[i, j, 2]
365                )  # Find the fiducial spectra for each pair
366                C_BD = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 3])
367                C_AD = self.find_spectrum(fiduDICT, keys[i, j, 0] + keys[i, j, 3])
368                C_BC = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 2])
369                N_AC = self.find_spectrum(
370                    noiseDICT, keys[i, j, 0] + keys[i, j, 2]
371                )  # Find the noise spectra for each pair
372                N_BD = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 3])
373                N_AD = self.find_spectrum(noiseDICT, keys[i, j, 0] + keys[i, j, 3])
374                N_BC = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 2])
375                if self.fsky is not None:  # If self.fsky is defined, use the fsky value
376                    res[i, j] = (
377                        (C_AC + N_AC) * (C_BD + N_BD) + (C_AD + N_AD) * (C_BC + N_BC)
378                    ) / self.fsky
379                else:  # Otherwise, use the fsky values from the input spectra
380                    AC = keys[i, j, 0] + keys[i, j, 2]
381                    BD = keys[i, j, 1] + keys[i, j, 3]
382                    AD = keys[i, j, 0] + keys[i, j, 3]
383                    BC = keys[i, j, 1] + keys[i, j, 2]
384                    AB = keys[i, j, 0] + keys[i, j, 1]
385                    CD = keys[i, j, 2] + keys[i, j, 3]
386                    res[i, j] = (
387                        np.sqrt(self.fskies[AC] * self.fskies[BD])
388                        * (C_AC + N_AC)
389                        * (C_BD + N_BD)
390                        + np.sqrt(self.fskies[AD] * self.fskies[BC])
391                        * (C_AD + N_AD)
392                        * (C_BC + N_BC)
393                    ) / (self.fskies[AB] * self.fskies[CD])
394                res[j, i] = res[i, j]
395        return res
396
397    def inv_sigma(self, sigma):
398        """Invert the covariance matrix of the Gaussian case.
399
400        Inverts the previously calculated sigma ndarray. Note that some elements may be null, thus the covariance may be singular. If so, this also reduces the dimension of the matrix by deleting the corresponding row and column.
401
402        Parameters:
403            ndarray (np.ndarray):
404                (self.n x self.n x self.lmax+1) ndarray with the previously computed sigma (not inverted).
405        """
406        # Initialize array to store the inverted covariance matrices
407        res = np.zeros(self.lmax + 1, dtype=object)
408
409        # Loop over multipoles
410        for i in range(self.lmax + 1):
411            # Check if matrix is singular
412            COV = sigma[:, :, i]
413            if np.linalg.det(COV) == 0:
414                # Get indices of null diagonal elements
415                idx = np.where(np.diag(COV) == 0)[0]
416                # Remove corresponding rows and columns
417                COV = np.delete(COV, idx, axis=0)
418                COV = np.delete(COV, idx, axis=1)
419            # Invert matrix
420            res[i] = np.linalg.inv(COV)
421        return res[2:]
422
423    def get_reduced_data(self, mat):
424        """Find the reduced data eliminating the singularity of the matrix.
425
426        Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.
427
428        Parameters:
429            ndarray (np.ndarray):
430                A ndarray containing the covariance matrices, with some singular ones.
431        """
432        # Select the indices corresponding to the zero diagonal
433        idx = np.where(np.diag(mat) == 0)[0]
434        # Delete the rows and columns from the matrix
435        return np.delete(np.delete(mat, idx, axis=0), idx, axis=1)
436
437    def CAMBres2dict(self, camb_results):
438        """Takes the CAMB result product from get_cmb_power_spectra and convert it to a dictionary with the proper keys.
439
440        Parameters:
441            camb_results (CAMBdata):
442                CAMB result product from the method get_cmb_power_spectra.
443        """
444        # Get the number of multipoles
445        ls = np.arange(camb_results["total"].shape[0], dtype=np.int64)
446        # Mapping between the CAMB keys and the ones we want
447        mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
448        # Initialize the output dictionary
449        res = {"ell": ls}
450        # Loop over the keys we want
451        for key, i in mapping.items():
452            # Save the results
453            res[key] = camb_results["total"][:, i]
454        # Check if we want the lensing potential
455        if "pp" in self.keys:
456            # Get the lensing potential
457            cl_lens = camb_results.get("lens_potential")
458            # Check if it exists
459            if cl_lens is not None:
460                # Save it
461                res["pp"] = cl_lens[:, 0].copy()
462                # Check if we want the cross terms
463                if "pt" in self.keys and "pe" in self.keys:
464                    # Loop over the cross terms
465                    for i, cross in enumerate(["pt", "pe"]):
466                        # Save the result
467                        res[cross] = cl_lens[:, i + 1].copy()
468                        # Save the symmetric term
469                        res[cross[::-1]] = res[cross]
470        return res
471
472    def txt2dict(self, txt, mapping=None, apply_ellfactor=None):
473        """Takes a txt file and convert it to a dictionary. This requires a way to map the columns to the keys. Also, it is possible to apply an ell factor to the Cls.
474
475        Parameters:
476            txt (str):
477                Path to txt file containing the spectra as columns.
478            mapping (dict):
479                Dictionary containing the mapping. Keywords will become the new keywords and values represent the index of the corresponding column.
480        """
481        # Define the ell values from the length of the txt file
482        assert (
483            mapping is not None
484        ), "You must provide a way to map the columns of your txt to the keys of a dictionary"
485        ls = np.arange(txt.shape[0], dtype=np.int64)
486        res = {"ell": ls}
487        # Loop over the mapping and extract the corresponding column from the txt file
488        # and store it in the dictionary under the corresponding keyword
489        for key, i in mapping.items():
490            if apply_ellfactor:
491                res[key] = txt[:, i] * ls * (ls + 1) / 2 / np.pi
492            else:
493                res[key] = txt[:, i]
494        return res
495
496    def prod_fidu(self):
497        """Produce fiducial spectra or read the input ones.
498
499        If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.
500        """
501        # If a custom file is provided, use that
502        if self.cl_file is not None:
503            # If the file is a pickle file, load it
504            if self.cl_file.endswith(".pkl"):
505                with open(self.cl_file, "rb") as pickle_file:
506                    res = pickle.load(pickle_file)
507            # Otherwise, load it as text file
508            else:
509                txt = np.loadtxt(self.cl_file)
510                mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
511                res = self.txt2dict(txt, mapping)
512            return res
513
514        try:
515            import camb
516        except ImportError:
517            print("CAMB seems to be not installed. Check the requirements.")
518
519        # Read the ini file containing the parameters for CAMB
520        path = os.path.dirname(os.path.abspath(__file__))
521        planck_path = os.path.join(path, "planck_2018.ini")
522        pars = camb.read_ini(planck_path)
523
524        if "bb" in self.keys:  # If we want to include the tensor mode
525            print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}")
526            pars.InitPower.set_params(
527                As=2.100549e-9,
528                ns=0.9660499,
529                r=self.r,
530                nt=self.nt,
531                pivot_tensor=self.pivot_t,
532                pivot_scalar=0.05,
533                parameterization=2,
534            )
535            pars.WantTensors = True
536            pars.Accuracy.AccurateBB = True
537        pars.DoLensing = True
538        # _pars.Accuracy.AccuracyBoost = 2 # This helps getting an extra squeeze on the accordance of Cobaya and Fiducial spectra
539
540        if self.debug:
541            print(pars)
542
543        results = camb.get_results(pars)
544        res = results.get_cmb_power_spectra(
545            CMB_unit="muK",
546            lmax=self.lmax,
547            raw_cl=False,
548        )
549        return self.CAMBres2dict(res)
550
551    def prod_noise(self):
552        """Produce noise power spectra or read the input ones.
553
554        If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.
555        """
556        # If the input noise file is a pickle file, load it.
557        if self.nl_file is not None:
558            if self.nl_file.endswith(".pkl"):
559                with open(self.nl_file, "rb") as pickle_file:
560                    res = pickle.load(pickle_file)
561            # If not, load the file as a text file
562            else:
563                _txt = np.loadtxt(self.nl_file)
564                _mapping = {"bb": 0}
565                # Convert the text file to a dictionary
566                res = self.txt2dict(_txt, _mapping, apply_ellfactor=True)
567            return res
568
569        print(
570            "***WARNING***: the inverse noise weighting performed here severely underestimates \
571            the actual noise level of LiteBIRD. You should provide an input \
572            noise power spectrum with a more realistic noise."
573        )
574
575        try:
576            import yaml
577            from yaml.loader import SafeLoader
578            import healpy as hp
579        except ImportError:
580            print("YAML or Healpy seems to be not installed. Check the requirements.")
581
582        assert (
583            self.experiment is not None
584        ), "You must specify the experiment you want to consider"
585        print(f"\nComputing noise for {self.experiment}")
586
587        path = os.path.dirname(os.path.abspath(__file__))
588        experiments_path = os.path.join(path, "experiments.yaml")
589        with open(experiments_path) as f:
590            data = yaml.load(f, Loader=SafeLoader)
591
592        # Get the instrument data from the saved data
593        instrument = data[self.experiment]
594
595        # Get the FWHM values from the instrument data
596        fwhms = np.array(instrument["fwhm"])
597
598        # Get the frequency values from the instrument data
599        freqs = np.array(instrument["frequency"])
600
601        # Get the depth values from the instrument data
602        depth_p = np.array(instrument["depth_p"])
603        depth_i = np.array(instrument["depth_i"])
604
605        # Convert the depth to a pixel value
606        depth_p /= hp.nside2resol(self.nside, arcmin=True)
607        depth_i /= hp.nside2resol(self.nside, arcmin=True)
608        depth_p = depth_p * np.sqrt(
609            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
610        )
611        depth_i = depth_i * np.sqrt(
612            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
613        )
614
615        # Get the number of frequencies
616        n_freq = len(freqs)
617
618        # Define the ell values as a numpy array
619        ell = np.arange(0, self.lmax + 1, 1)
620
621        # Define the keys for the dictionary that will be returned
622        keys = ["tt", "ee", "bb"]
623
624        sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0))
625        sigma2 = sigma**2
626
627        # Calculate the Gaussian beam function
628        g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis])
629
630        # Calculate the polarization factor
631        pol_factor = np.array(
632            [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2],
633        )
634
635        # Calculate the polarization factor as a function of ell
636        pol_factor = np.exp(pol_factor)
637
638        # Calculate the Gaussian beam function for each polarization
639        G = []
640        for i, arr in enumerate(pol_factor):
641            G.append(g * arr[:, np.newaxis])
642        g = np.array(G)
643
644        # Initialize the dictionary that will be returned
645        res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys}
646
647        # Calculate the unnormalized power spectra
648        res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2)
649        res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
650        res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
651
652        # Calculate the normalized power spectra
653        res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi
654        res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi
655        res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi
656
657        res["tt"][:2] = [0, 0]
658        res["ee"][:2] = [0, 0]
659        res["bb"][:2] = [0, 0]
660
661        return res
662
663    def initialize(self):
664        """Initializes the fiducial spectra and the noise power spectra."""
665        # Compute the fiducial and noise power spectra
666        self.fiduCLS = self.prod_fidu()
667        self.noiseCLS = self.prod_noise()
668
669        # Compute the covariance matrices
670        self.fiduCOV = self.cov_filling(self.fiduCLS)
671        self.noiseCOV = self.cov_filling(self.noiseCLS)
672
673        # Print some information for debugging
674        if self.debug:
675            print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}")
676            print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}")
677
678            field = "yy"
679            print("\nPrinting the first few values to check that it starts from 0...")
680            print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}")
681            print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}")
682
683        # Compute the total covariance matrix
684        self.data = (
685            self.fiduCOV[:, :, self.lmin : self.lmax + 1]
686            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
687        )
688
689        # Compute the inverse of the covariance matrix
690        if self.like == "gaussian":
691            self.gauss_keys = self.get_Gauss_keys()
692            sigma2 = self.sigma(self.gauss_keys, self.fiduCLS, self.noiseCLS)
693            self.sigma2 = self.inv_sigma(sigma2)
694
695    def get_requirements(self):
696        """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing."""
697        # The likelihood needs the lensed CMB angular power spectra. The keyword can be set to "unlensed_Cl" to get the unlensed ones
698        requitements = {}
699        requitements["Cl"] = {cl: self.lmax for cl in self.keys}
700        # If debug is set to True, the likelihood will print the list of items required by the likelihood
701        if self.debug:
702            requitements["CAMBdata"] = None
703            print(
704                f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}",
705            )
706        return requitements
707
708    def data_vector(self, cov):
709        """Get data vector from the covariance matrix.
710
711        Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.
712
713        Parameters:
714            cov (np.ndarray):
715                A ndarray containing the covariance matrices, with some null ones.
716        """
717        return cov[np.triu_indices(self.n)][cov[np.triu_indices(self.n)] != 0]
718
719    def chi_exact(self, i=0):
720        """Computes proper chi-square term for the exact likelihood case.
721
722        Parameters:
723            i (int, optional):
724                ell index if needed. Defaults to 0.
725        """
726        # If the number of datasets is not equal to 1, then we have a
727        # multi-dataset case, in which case we need to compute the
728        # covariance matrix for each dataset.
729        if self.n != 1:
730            # We extract the covariance matrix and data for the ith
731            # dataset.
732            coba = self.coba[:, :, i]
733            data = self.data[:, :, i]
734            det = np.linalg.det(coba)
735            # If the determinant is equal to 0, then we need to reduce
736            # the dimensionality of the data and covariance matrix.
737            if det == 0:
738                data = self.get_reduced_data(data)
739                coba = self.get_reduced_data(coba)
740            # We compute the matrix M using the covariance matrix and
741            # the data.
742            M = np.linalg.solve(coba, data)
743            # We compute the chi-square term using the trace of M, the
744            # log determinant of M, and the number of fields.
745            return np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0]
746        # If the number of datasets is equal to 1, then we have a single
747        # dataset case, in which case we do not need to loop over the
748        # datasets.
749        else:
750            # We compute the matrix M using the covariance matrix and
751            # the data.
752            M = self.data / self.coba
753            # We compute the chi-square term using M, the log of M, and
754            # a constant value.
755            return M - np.log(np.abs(M)) - 1
756
757    def chi_gaussian(self, i=0):
758        """Computes proper chi-square term for the Gaussian likelihood case.
759
760        Parameters:
761            i (int, optional):
762                ell index if needed. Defaults to 0.
763        """
764        # If we have more than one data vector
765        if self.n != 1:
766            coba = self.data_vector(self.coba[:, :, i])
767            data = self.data_vector(self.data[:, :, i])
768            return (coba - data) @ self.sigma2[i] @ (coba - data)
769        # If we have only one data vector
770        else:
771            coba = self.coba[0, 0, :]
772            data = self.data[0, 0, :]
773            res = (coba - data) * self.sigma2 * (coba - data)
774            return res
775
776    def compute_chi_part(self, i=0):
777        """Chooses which chi-square term to compute.
778
779        Parameters:
780            i (int, optional):
781                ell index if needed. Defaults to 0.
782        """
783        # check if the likelihood is "exact"
784        if self.like == "exact":
785            # if so, compute the chi-square term for the exact likelihood
786            return self.chi_exact(i)
787        # if not, check if it is "gaussian"
788        elif self.like == "gaussian":
789            # if so, compute the chi-square term for the gaussian likelihood
790            return self.chi_gaussian(i)
791        # if neither, print an error message
792        else:
793            print("You requested something different from 'exact or 'gaussian'!")
794            return
795
796    def log_likelihood(self):
797        """Computes the log likelihood."""
798        # Get the array of multipoles
799        ell = np.arange(self.lmin, self.lmax + 1, 1)
800        # Compute the log likelihood for each multipole
801        if self.n != 1:
802            logp_ℓ = np.zeros(ell.shape)
803            for i in range(0, self.lmax + 1 - self.lmin):
804                logp_ℓ[i] = -0.5 * (2 * ell[i] + 1) * self.compute_chi_part(i)
805        else:
806            logp_ℓ = -0.5 * (2 * ell + 1) * self.compute_chi_part()
807        # Sum the log likelihood over multipoles
808        return np.sum(logp_ℓ)
809
810    def logp(self, **params_values):
811        """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process."""
812        if self.debug:
813            CAMBdata = self.provider.get_CAMBdata()
814            pars = CAMBdata.Params
815            print(pars)
816
817        # Get the Cls from Cobaya
818        self.cobaCLs = self.provider.get_Cl(ell_factor=True)
819
820        if self.debug:
821            print(f"Keys of Cobaya CLs ---> {self.cobaCLs.keys()}")
822
823            field = "yy"
824            print("\nPrinting the first few values to check that it starts from 0...")
825            print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLs[field][0:5]}")
826
827        # Fill the covariance matrix with the Cls from Cobaya
828        self.cobaCOV = self.cov_filling(self, self.cobaCLs)
829
830        if self.debug:
831            ell = np.arange(0, self.lmax + 1, 1)
832            plt.loglog(ell, self.fiduCOV[0, 0, :], label="Fiducial CLs")
833            plt.loglog(ell, self.cobaCOV[0, 0, :], label="Cobaya CLs", ls="--")
834            plt.loglog(ell, self.noiseCOV[0, 0, :], label="Noise CLs")
835            plt.xlim(2, None)
836            plt.legend()
837            plt.show()
838
839        # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya
840        self.coba = (
841            self.cobaCOV[:, :, self.lmin : self.lmax + 1]
842            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
843        )
844
845        # Compute the likelihood
846        logp = self.log_likelihood()
847
848        if self.debug:
849            print(logp)
850            exit()
851
852        return logp

Class defining the Likelihood for LiteBIRD (LiLit).

Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing.

Arguments:
  • name (str): The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None).
  • fields (list): List of fields in the data file (default: None).
  • lmax (int or list): Maximum multipole to consider (default: None).
  • like (str, optional): Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian".
  • lmin (int or list): Minimum multipole to consider (default: 2).
  • cl_file (str, optional): Path to Cl file (default: None).
  • nl_file (str, optional): Path to noise file (default: None).
  • experiment (str, optional): Name of experiment (default: None).
  • nside (int, optional): Nside of the map (default: None).
  • r (float, optional): Tensor-to-scalar ratio (default: None).
  • nt (float, optional): Tensor spectral tilt (default: None).
  • pivot_t (float, optional): Pivot scale of the tensor primordial power spectrum (default: 0.01).
  • fsky (float or list): Sky fraction (default: 1).
  • sep (str, optional): Separator used in the data file (default: "").
  • debug (bool, optional): If True, produces more verbose output (default: None).
Attributes:
  • fields (list): List of fields in the data file.
  • n_fields (int): Number of fields.
  • keys (list): List of keywords for the dictionaries.
  • gauss_keys (list): List of keywords for the Gaussian likelihood (4-points).
  • sigma2 (np.ndarray): Array of covariances for the Gaussian likelihood case.
  • lmax (int or list): List of lmax values.
  • lmaxes (dict): Dictionary of lmax values.
  • fsky (int or list): List of fsky values.
  • fskies (dict): Dictionary of fsky values.
  • lmin (int or list): Minimum multipole to consider.
  • lmins (dict): Dictionary of lmin values.
  • like (str): Type of likelihood to use.
  • cl_file (str): Path to Cl file.
  • fiduCLS (dict): Dictionary of fiducial Cls.
  • noiseCLS (dict): Dictionary of noise Cls.
  • fiduCOV (np.ndarray): Fiducial covariance matrix obtained from the corresponding dictionary.
  • noiseCOV (np.ndarray): Noise covariance matrix obtained from the corresponding dictionary.
  • data (np.ndarray): Data vector obtained by summing fiduCOV + noiseCOV.
  • cobaCLS (dict): Dictionary of Cobaya Cls.
  • cobaCOV (np.ndarray): Cobaya covariance matrix obtained from the corresponding dictionary.
  • coba (np.ndarray): Cobaya vector obtained by summing cobaCOV + noiseCOV.
  • nl_file (str): Path to noise file.
  • experiment (str): Name of experiment.
  • nside (int): Nside of the map.
  • r (float): Tensor-to-scalar ratio.
  • nt (float): Tensor spectral tilt.
  • pivot_t (float): Pivot scale of the tensor primordial power spectrum.
  • sep (str): Separator used in the data file.
  • debug (bool): If True, produces more output.
LiLit( name=None, fields=None, lmax=None, like='exact', lmin=2, cl_file=None, nl_file=None, experiment=None, nside=None, r=None, nt=None, pivot_t=0.01, fsky=1, sep='', debug=None)
110    def __init__(
111        self,
112        name=None,
113        fields=None,
114        lmax=None,
115        like="exact",
116        lmin=2,
117        cl_file=None,
118        nl_file=None,
119        experiment=None,
120        nside=None,
121        r=None,
122        nt=None,
123        pivot_t=0.01,
124        fsky=1,
125        sep="",
126        debug=None,
127    ):
128        # Check that the user has provided the name of the likelihood
129        assert (
130            name is not None
131        ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')"
132        # Check that the user has provided the fields
133        assert (
134            fields is not None
135        ), "You must provide the fields (e.g. 'b' or ['t', 'e'])"
136        # Check that the user has provided the maximum multipole
137        assert lmax is not None, "You must provide the lmax (e.g. 300)"
138
139        self.fields = fields
140        self.n = len(fields)
141        self.lmin = lmin
142        self.like = like
143        self.sep = sep
144        self.cl_file = cl_file
145        self.nl_file = nl_file
146        self.experiment = experiment
147        if self.experiment is not None:
148            # Check that the user has provided the nside if an experiment is used
149            assert nside is not None, "You must provide an nside to compute the noise"
150            self.nside = nside
151        self.debug = debug
152        self.keys = self.get_keys()
153        if "bb" in self.keys:
154            # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used
155            assert (
156                r is not None
157            ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)"
158            self.r = r
159            self.nt = nt
160            self.pivot_t = pivot_t
161
162        self.set_lmin_lmax_fsky(lmin, lmax, fsky)
163
164        Likelihood.__init__(self, name=name)
def set_lmin_lmax_fsky(self, lmin, lmax, fsky):
166    def set_lmin_lmax_fsky(self, lmin, lmax, fsky):
167        """Take lmin, lmax and fsky parameters and set the corresponding attributes.
168
169        Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.
170
171        Parameters:
172            lmin (int or list):
173                Value or list of values of lmin.
174            lmax (int or list):
175                Value or list of values of lmax.
176            fsky (float or list):
177                Value or list of values of fsky.
178        """
179
180        self.lmins = {}
181        self.lmaxs = {}
182        self.fskies = {}
183
184        # Set lmin
185        if isinstance(lmin, list):
186            assert (
187                len(lmin) == self.n
188            ), "If you provide multiple lmin, they must match the number of requested fields with the same order"
189            for i in range(self.n):
190                for j in range(i, self.n):
191                    key = self.fields[i] + self.sep + self.fields[j]
192                    self.lmins[key] = int(
193                        np.ceil(np.sqrt(lmin[i] * lmin[j]))
194                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
195                    self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j])))
196            self.lmin = min(lmin)
197        else:
198            self.lmin = lmin
199
200        # Set lmax
201        if isinstance(lmax, list):
202            assert (
203                len(lmax) == self.n
204            ), "If you provide multiple lmax, they must match the number of requested fields with the same order"
205            for i in range(self.n):
206                for j in range(i, self.n):
207                    key = self.fields[i] + self.sep + self.fields[j]
208                    self.lmaxs[key] = int(
209                        np.floor(np.sqrt(lmax[i] * lmax[j]))
210                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
211                    self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j])))
212            self.lmax = max(lmax)
213        else:
214            self.lmax = lmax
215
216        # Set fsky
217        if isinstance(fsky, list):
218            assert (
219                len(fsky) == self.n
220            ), "If you provide multiple fsky, they must match the number of requested fields with the same order"
221            for i in range(self.n):
222                for j in range(i, self.n):
223                    key = self.fields[i] + self.sep + self.fields[j]
224                    self.fskies[key] = np.sqrt(
225                        fsky[i] * fsky[j]
226                    )  # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations)
227                    self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j])
228            self.fsky = None
229        else:
230            self.fsky = fsky
231        return

Take lmin, lmax and fsky parameters and set the corresponding attributes.

Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.

Arguments:
  • lmin (int or list): Value or list of values of lmin.
  • lmax (int or list): Value or list of values of lmax.
  • fsky (float or list): Value or list of values of fsky.
def cov_filling(self, cov_dict):
233    def cov_filling(self, cov_dict):
234        """Fill covariance matrix with appropriate spectra.
235
236        Computes the covariance matrix once given a dictionary. Returns the covariance matrix of the considered fields, in a shape equal to (num_fields x num_fields x lmax). Note that if more than one lmax, or lmin, is specified, there will be null values in the matrices, making them singular. This will be handled in another method.
237
238        Parameters:
239            cov_dict (dict):
240                The input dictionary of spectra.
241        """
242        # Initialize output array
243        res = np.zeros((self.n, self.n, self.lmax + 1))
244
245        # Loop over field1
246        for i, field1 in enumerate(self.fields):
247            # Loop over field2
248            for j, field2 in enumerate(self.fields[i:]):
249                # Get the index of field2
250                j += i
251
252                # Get the key of the covariance matrix
253                key = field1 + self.sep + field2
254
255                # Get lmin and lmax for this field pair
256                lmin = self.lmins.get(key, self.lmin)
257                lmax = self.lmaxs.get(key, self.lmax)
258
259                # Get the covariance for this field pair
260                cov = cov_dict.get(key, np.zeros(lmax + 1))
261
262                # Set the appropriate values in the covariance matrix
263                res[i, j, lmin : lmax + 1] = cov[lmin : lmax + 1]
264                # Fill the covariance matrix symmetrically
265                res[j, i] = res[i, j]
266
267        return res

Fill covariance matrix with appropriate spectra.

Computes the covariance matrix once given a dictionary. Returns the covariance matrix of the considered fields, in a shape equal to (num_fields x num_fields x lmax). Note that if more than one lmax, or lmin, is specified, there will be null values in the matrices, making them singular. This will be handled in another method.

Arguments:
  • cov_dict (dict): The input dictionary of spectra.
def get_keys(self):
269    def get_keys(self):
270        """Extracts the keys that has to be used as a function of the requested fields. These will be the usual 2-points, e.g., tt, te, ee, etc."""
271        # List of all the possible combinations of the requested fields
272        res = [
273            self.fields[i] + self.sep + self.fields[j]
274            for i in range(self.n)
275            for j in range(i, self.n)
276        ]
277        # Print the requested keys
278        if self.debug:
279            print(f"\nThe requested keys are {res}")
280        return res

Extracts the keys that has to be used as a function of the requested fields. These will be the usual 2-points, e.g., tt, te, ee, etc.

def get_Gauss_keys(self):
282    def get_Gauss_keys(self):
283        """Find the proper dictionary keys for the requested fields.
284
285        Extracts the keys that has to be used as a function of the requested fields for the Gaussian likelihood. Indeed, the Gaussian likelihood is computed using 4-points, so the keys are different. E.g., there will be keys such as tttt, ttee, tete, etc.
286        """
287        # Calculate the number of elements in the covariance matrix
288        n = int(self.n * (self.n + 1) / 2)
289        # Initialize a 3-d array to store the keys
290        res = np.zeros((n, n, 4), dtype=str)
291        # Loop over all the elements in the covariance matrix
292        for i in range(n):
293            for j in range(i, n):
294                # Generate a key for the i-th and j-th element
295                elem = self.keys[i] + self.sep + self.keys[j]
296                # Loop over all the characters in the key
297                for k in range(4):
298                    # Add the k-th character to the i-th, j-th, and k-th
299                    # indices of the array
300                    res[i, j, k] = np.asarray(list(elem)[k])
301                    res[j, i, k] = res[i, j, k]
302        # Print the keys if the debug flag is set
303        if self.debug:
304            print(f"\nThe requested keys are {res}")
305        # Return the keys
306        return res

Find the proper dictionary keys for the requested fields.

Extracts the keys that has to be used as a function of the requested fields for the Gaussian likelihood. Indeed, the Gaussian likelihood is computed using 4-points, so the keys are different. E.g., there will be keys such as tttt, ttee, tete, etc.

def find_spectrum(self, input_dict, key):
308    def find_spectrum(self, input_dict, key):
309        """Find a spectrum in a given dictionary.
310
311        Returns the corresponding power sepctrum for a given key. If the key is not found, it will try to find the reverse key. Otherwise it will fill the array with zeros.
312
313        Parameters:
314            input_dict (dict):
315                Dictionary where you want to search for keys.
316
317            key (str):
318                Key to search for.
319        """
320        # create a zero array
321        res = np.zeros(self.lmax + 1)
322
323        # get lmin and lmax
324        lmin = self.lmins.get(key, self.lmin)
325        lmax = self.lmaxs.get(key, self.lmax)
326
327        # try to find the key in the dictionary
328        if key in input_dict:
329            cov = input_dict[key]
330        # if the key is not found, try the reverse key
331        else:
332            cov = input_dict.get(key[::-1], np.zeros(lmax + 1))
333
334        # fill the array with the requested spectrum
335        res[lmin : lmax + 1] = cov[lmin : lmax + 1]
336
337        return res

Find a spectrum in a given dictionary.

Returns the corresponding power sepctrum for a given key. If the key is not found, it will try to find the reverse key. Otherwise it will fill the array with zeros.

Arguments:
  • input_dict (dict): Dictionary where you want to search for keys.
  • key (str): Key to search for.
def sigma(self, keys, fiduDICT, noiseDICT):
339    def sigma(self, keys, fiduDICT, noiseDICT):
340        """Define the covariance matrix for the Gaussian case.
341
342        In case of Gaussian likelihood, this returns the covariance matrix needed for the computation of the chi2. Note that the inversion is done in a separate funciton.
343
344        Parameters:
345            keys (dict):
346                Keys for the covariance elements.
347
348            fiduDICT (dict):
349                Dictionary with the fiducial spectra.
350
351            noiseDICT (dict):
352                Dictionary with the noise spectra.
353        """
354        # The covariance matrix has to be symmetric.
355        # The number of parameters in the likelihood is self.n.
356        # The covariance matrix is a (self.n x self.n x self.lmax+1) ndarray.
357        # We will store the covariance matrix in a (n x n x self.lmax+1) ndarray,
358        # where n = int(self.n * (self.n + 1) / 2).
359        n = int(self.n * (self.n + 1) / 2)
360        res = np.zeros((n, n, self.lmax + 1))
361        for i in range(n):  # Loop over all combinations of pairs of spectra
362            for j in range(i, n):
363                C_AC = self.find_spectrum(
364                    fiduDICT, keys[i, j, 0] + keys[i, j, 2]
365                )  # Find the fiducial spectra for each pair
366                C_BD = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 3])
367                C_AD = self.find_spectrum(fiduDICT, keys[i, j, 0] + keys[i, j, 3])
368                C_BC = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 2])
369                N_AC = self.find_spectrum(
370                    noiseDICT, keys[i, j, 0] + keys[i, j, 2]
371                )  # Find the noise spectra for each pair
372                N_BD = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 3])
373                N_AD = self.find_spectrum(noiseDICT, keys[i, j, 0] + keys[i, j, 3])
374                N_BC = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 2])
375                if self.fsky is not None:  # If self.fsky is defined, use the fsky value
376                    res[i, j] = (
377                        (C_AC + N_AC) * (C_BD + N_BD) + (C_AD + N_AD) * (C_BC + N_BC)
378                    ) / self.fsky
379                else:  # Otherwise, use the fsky values from the input spectra
380                    AC = keys[i, j, 0] + keys[i, j, 2]
381                    BD = keys[i, j, 1] + keys[i, j, 3]
382                    AD = keys[i, j, 0] + keys[i, j, 3]
383                    BC = keys[i, j, 1] + keys[i, j, 2]
384                    AB = keys[i, j, 0] + keys[i, j, 1]
385                    CD = keys[i, j, 2] + keys[i, j, 3]
386                    res[i, j] = (
387                        np.sqrt(self.fskies[AC] * self.fskies[BD])
388                        * (C_AC + N_AC)
389                        * (C_BD + N_BD)
390                        + np.sqrt(self.fskies[AD] * self.fskies[BC])
391                        * (C_AD + N_AD)
392                        * (C_BC + N_BC)
393                    ) / (self.fskies[AB] * self.fskies[CD])
394                res[j, i] = res[i, j]
395        return res

Define the covariance matrix for the Gaussian case.

In case of Gaussian likelihood, this returns the covariance matrix needed for the computation of the chi2. Note that the inversion is done in a separate funciton.

Arguments:
  • keys (dict): Keys for the covariance elements.
  • fiduDICT (dict): Dictionary with the fiducial spectra.
  • noiseDICT (dict): Dictionary with the noise spectra.
def inv_sigma(self, sigma):
397    def inv_sigma(self, sigma):
398        """Invert the covariance matrix of the Gaussian case.
399
400        Inverts the previously calculated sigma ndarray. Note that some elements may be null, thus the covariance may be singular. If so, this also reduces the dimension of the matrix by deleting the corresponding row and column.
401
402        Parameters:
403            ndarray (np.ndarray):
404                (self.n x self.n x self.lmax+1) ndarray with the previously computed sigma (not inverted).
405        """
406        # Initialize array to store the inverted covariance matrices
407        res = np.zeros(self.lmax + 1, dtype=object)
408
409        # Loop over multipoles
410        for i in range(self.lmax + 1):
411            # Check if matrix is singular
412            COV = sigma[:, :, i]
413            if np.linalg.det(COV) == 0:
414                # Get indices of null diagonal elements
415                idx = np.where(np.diag(COV) == 0)[0]
416                # Remove corresponding rows and columns
417                COV = np.delete(COV, idx, axis=0)
418                COV = np.delete(COV, idx, axis=1)
419            # Invert matrix
420            res[i] = np.linalg.inv(COV)
421        return res[2:]

Invert the covariance matrix of the Gaussian case.

Inverts the previously calculated sigma ndarray. Note that some elements may be null, thus the covariance may be singular. If so, this also reduces the dimension of the matrix by deleting the corresponding row and column.

Arguments:
  • ndarray (np.ndarray): (self.n x self.n x self.lmax+1) ndarray with the previously computed sigma (not inverted).
def get_reduced_data(self, mat):
423    def get_reduced_data(self, mat):
424        """Find the reduced data eliminating the singularity of the matrix.
425
426        Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.
427
428        Parameters:
429            ndarray (np.ndarray):
430                A ndarray containing the covariance matrices, with some singular ones.
431        """
432        # Select the indices corresponding to the zero diagonal
433        idx = np.where(np.diag(mat) == 0)[0]
434        # Delete the rows and columns from the matrix
435        return np.delete(np.delete(mat, idx, axis=0), idx, axis=1)

Find the reduced data eliminating the singularity of the matrix.

Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.

Arguments:
  • ndarray (np.ndarray): A ndarray containing the covariance matrices, with some singular ones.
def CAMBres2dict(self, camb_results):
437    def CAMBres2dict(self, camb_results):
438        """Takes the CAMB result product from get_cmb_power_spectra and convert it to a dictionary with the proper keys.
439
440        Parameters:
441            camb_results (CAMBdata):
442                CAMB result product from the method get_cmb_power_spectra.
443        """
444        # Get the number of multipoles
445        ls = np.arange(camb_results["total"].shape[0], dtype=np.int64)
446        # Mapping between the CAMB keys and the ones we want
447        mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
448        # Initialize the output dictionary
449        res = {"ell": ls}
450        # Loop over the keys we want
451        for key, i in mapping.items():
452            # Save the results
453            res[key] = camb_results["total"][:, i]
454        # Check if we want the lensing potential
455        if "pp" in self.keys:
456            # Get the lensing potential
457            cl_lens = camb_results.get("lens_potential")
458            # Check if it exists
459            if cl_lens is not None:
460                # Save it
461                res["pp"] = cl_lens[:, 0].copy()
462                # Check if we want the cross terms
463                if "pt" in self.keys and "pe" in self.keys:
464                    # Loop over the cross terms
465                    for i, cross in enumerate(["pt", "pe"]):
466                        # Save the result
467                        res[cross] = cl_lens[:, i + 1].copy()
468                        # Save the symmetric term
469                        res[cross[::-1]] = res[cross]
470        return res

Takes the CAMB result product from get_cmb_power_spectra and convert it to a dictionary with the proper keys.

Arguments:
  • camb_results (CAMBdata): CAMB result product from the method get_cmb_power_spectra.
def txt2dict(self, txt, mapping=None, apply_ellfactor=None):
472    def txt2dict(self, txt, mapping=None, apply_ellfactor=None):
473        """Takes a txt file and convert it to a dictionary. This requires a way to map the columns to the keys. Also, it is possible to apply an ell factor to the Cls.
474
475        Parameters:
476            txt (str):
477                Path to txt file containing the spectra as columns.
478            mapping (dict):
479                Dictionary containing the mapping. Keywords will become the new keywords and values represent the index of the corresponding column.
480        """
481        # Define the ell values from the length of the txt file
482        assert (
483            mapping is not None
484        ), "You must provide a way to map the columns of your txt to the keys of a dictionary"
485        ls = np.arange(txt.shape[0], dtype=np.int64)
486        res = {"ell": ls}
487        # Loop over the mapping and extract the corresponding column from the txt file
488        # and store it in the dictionary under the corresponding keyword
489        for key, i in mapping.items():
490            if apply_ellfactor:
491                res[key] = txt[:, i] * ls * (ls + 1) / 2 / np.pi
492            else:
493                res[key] = txt[:, i]
494        return res

Takes a txt file and convert it to a dictionary. This requires a way to map the columns to the keys. Also, it is possible to apply an ell factor to the Cls.

Arguments:
  • txt (str): Path to txt file containing the spectra as columns.
  • mapping (dict): Dictionary containing the mapping. Keywords will become the new keywords and values represent the index of the corresponding column.
def prod_fidu(self):
496    def prod_fidu(self):
497        """Produce fiducial spectra or read the input ones.
498
499        If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.
500        """
501        # If a custom file is provided, use that
502        if self.cl_file is not None:
503            # If the file is a pickle file, load it
504            if self.cl_file.endswith(".pkl"):
505                with open(self.cl_file, "rb") as pickle_file:
506                    res = pickle.load(pickle_file)
507            # Otherwise, load it as text file
508            else:
509                txt = np.loadtxt(self.cl_file)
510                mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
511                res = self.txt2dict(txt, mapping)
512            return res
513
514        try:
515            import camb
516        except ImportError:
517            print("CAMB seems to be not installed. Check the requirements.")
518
519        # Read the ini file containing the parameters for CAMB
520        path = os.path.dirname(os.path.abspath(__file__))
521        planck_path = os.path.join(path, "planck_2018.ini")
522        pars = camb.read_ini(planck_path)
523
524        if "bb" in self.keys:  # If we want to include the tensor mode
525            print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}")
526            pars.InitPower.set_params(
527                As=2.100549e-9,
528                ns=0.9660499,
529                r=self.r,
530                nt=self.nt,
531                pivot_tensor=self.pivot_t,
532                pivot_scalar=0.05,
533                parameterization=2,
534            )
535            pars.WantTensors = True
536            pars.Accuracy.AccurateBB = True
537        pars.DoLensing = True
538        # _pars.Accuracy.AccuracyBoost = 2 # This helps getting an extra squeeze on the accordance of Cobaya and Fiducial spectra
539
540        if self.debug:
541            print(pars)
542
543        results = camb.get_results(pars)
544        res = results.get_cmb_power_spectra(
545            CMB_unit="muK",
546            lmax=self.lmax,
547            raw_cl=False,
548        )
549        return self.CAMBres2dict(res)

Produce fiducial spectra or read the input ones.

If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.

def prod_noise(self):
551    def prod_noise(self):
552        """Produce noise power spectra or read the input ones.
553
554        If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.
555        """
556        # If the input noise file is a pickle file, load it.
557        if self.nl_file is not None:
558            if self.nl_file.endswith(".pkl"):
559                with open(self.nl_file, "rb") as pickle_file:
560                    res = pickle.load(pickle_file)
561            # If not, load the file as a text file
562            else:
563                _txt = np.loadtxt(self.nl_file)
564                _mapping = {"bb": 0}
565                # Convert the text file to a dictionary
566                res = self.txt2dict(_txt, _mapping, apply_ellfactor=True)
567            return res
568
569        print(
570            "***WARNING***: the inverse noise weighting performed here severely underestimates \
571            the actual noise level of LiteBIRD. You should provide an input \
572            noise power spectrum with a more realistic noise."
573        )
574
575        try:
576            import yaml
577            from yaml.loader import SafeLoader
578            import healpy as hp
579        except ImportError:
580            print("YAML or Healpy seems to be not installed. Check the requirements.")
581
582        assert (
583            self.experiment is not None
584        ), "You must specify the experiment you want to consider"
585        print(f"\nComputing noise for {self.experiment}")
586
587        path = os.path.dirname(os.path.abspath(__file__))
588        experiments_path = os.path.join(path, "experiments.yaml")
589        with open(experiments_path) as f:
590            data = yaml.load(f, Loader=SafeLoader)
591
592        # Get the instrument data from the saved data
593        instrument = data[self.experiment]
594
595        # Get the FWHM values from the instrument data
596        fwhms = np.array(instrument["fwhm"])
597
598        # Get the frequency values from the instrument data
599        freqs = np.array(instrument["frequency"])
600
601        # Get the depth values from the instrument data
602        depth_p = np.array(instrument["depth_p"])
603        depth_i = np.array(instrument["depth_i"])
604
605        # Convert the depth to a pixel value
606        depth_p /= hp.nside2resol(self.nside, arcmin=True)
607        depth_i /= hp.nside2resol(self.nside, arcmin=True)
608        depth_p = depth_p * np.sqrt(
609            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
610        )
611        depth_i = depth_i * np.sqrt(
612            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
613        )
614
615        # Get the number of frequencies
616        n_freq = len(freqs)
617
618        # Define the ell values as a numpy array
619        ell = np.arange(0, self.lmax + 1, 1)
620
621        # Define the keys for the dictionary that will be returned
622        keys = ["tt", "ee", "bb"]
623
624        sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0))
625        sigma2 = sigma**2
626
627        # Calculate the Gaussian beam function
628        g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis])
629
630        # Calculate the polarization factor
631        pol_factor = np.array(
632            [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2],
633        )
634
635        # Calculate the polarization factor as a function of ell
636        pol_factor = np.exp(pol_factor)
637
638        # Calculate the Gaussian beam function for each polarization
639        G = []
640        for i, arr in enumerate(pol_factor):
641            G.append(g * arr[:, np.newaxis])
642        g = np.array(G)
643
644        # Initialize the dictionary that will be returned
645        res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys}
646
647        # Calculate the unnormalized power spectra
648        res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2)
649        res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
650        res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
651
652        # Calculate the normalized power spectra
653        res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi
654        res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi
655        res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi
656
657        res["tt"][:2] = [0, 0]
658        res["ee"][:2] = [0, 0]
659        res["bb"][:2] = [0, 0]
660
661        return res

Produce noise power spectra or read the input ones.

If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.

def initialize(self):
663    def initialize(self):
664        """Initializes the fiducial spectra and the noise power spectra."""
665        # Compute the fiducial and noise power spectra
666        self.fiduCLS = self.prod_fidu()
667        self.noiseCLS = self.prod_noise()
668
669        # Compute the covariance matrices
670        self.fiduCOV = self.cov_filling(self.fiduCLS)
671        self.noiseCOV = self.cov_filling(self.noiseCLS)
672
673        # Print some information for debugging
674        if self.debug:
675            print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}")
676            print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}")
677
678            field = "yy"
679            print("\nPrinting the first few values to check that it starts from 0...")
680            print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}")
681            print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}")
682
683        # Compute the total covariance matrix
684        self.data = (
685            self.fiduCOV[:, :, self.lmin : self.lmax + 1]
686            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
687        )
688
689        # Compute the inverse of the covariance matrix
690        if self.like == "gaussian":
691            self.gauss_keys = self.get_Gauss_keys()
692            sigma2 = self.sigma(self.gauss_keys, self.fiduCLS, self.noiseCLS)
693            self.sigma2 = self.inv_sigma(sigma2)

Initializes the fiducial spectra and the noise power spectra.

def get_requirements(self):
695    def get_requirements(self):
696        """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing."""
697        # The likelihood needs the lensed CMB angular power spectra. The keyword can be set to "unlensed_Cl" to get the unlensed ones
698        requitements = {}
699        requitements["Cl"] = {cl: self.lmax for cl in self.keys}
700        # If debug is set to True, the likelihood will print the list of items required by the likelihood
701        if self.debug:
702            requitements["CAMBdata"] = None
703            print(
704                f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}",
705            )
706        return requitements

Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing.

def data_vector(self, cov):
708    def data_vector(self, cov):
709        """Get data vector from the covariance matrix.
710
711        Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.
712
713        Parameters:
714            cov (np.ndarray):
715                A ndarray containing the covariance matrices, with some null ones.
716        """
717        return cov[np.triu_indices(self.n)][cov[np.triu_indices(self.n)] != 0]

Get data vector from the covariance matrix.

Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.

Arguments:
  • cov (np.ndarray): A ndarray containing the covariance matrices, with some null ones.
def chi_exact(self, i=0):
719    def chi_exact(self, i=0):
720        """Computes proper chi-square term for the exact likelihood case.
721
722        Parameters:
723            i (int, optional):
724                ell index if needed. Defaults to 0.
725        """
726        # If the number of datasets is not equal to 1, then we have a
727        # multi-dataset case, in which case we need to compute the
728        # covariance matrix for each dataset.
729        if self.n != 1:
730            # We extract the covariance matrix and data for the ith
731            # dataset.
732            coba = self.coba[:, :, i]
733            data = self.data[:, :, i]
734            det = np.linalg.det(coba)
735            # If the determinant is equal to 0, then we need to reduce
736            # the dimensionality of the data and covariance matrix.
737            if det == 0:
738                data = self.get_reduced_data(data)
739                coba = self.get_reduced_data(coba)
740            # We compute the matrix M using the covariance matrix and
741            # the data.
742            M = np.linalg.solve(coba, data)
743            # We compute the chi-square term using the trace of M, the
744            # log determinant of M, and the number of fields.
745            return np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0]
746        # If the number of datasets is equal to 1, then we have a single
747        # dataset case, in which case we do not need to loop over the
748        # datasets.
749        else:
750            # We compute the matrix M using the covariance matrix and
751            # the data.
752            M = self.data / self.coba
753            # We compute the chi-square term using M, the log of M, and
754            # a constant value.
755            return M - np.log(np.abs(M)) - 1

Computes proper chi-square term for the exact likelihood case.

Arguments:
  • i (int, optional): ell index if needed. Defaults to 0.
def chi_gaussian(self, i=0):
757    def chi_gaussian(self, i=0):
758        """Computes proper chi-square term for the Gaussian likelihood case.
759
760        Parameters:
761            i (int, optional):
762                ell index if needed. Defaults to 0.
763        """
764        # If we have more than one data vector
765        if self.n != 1:
766            coba = self.data_vector(self.coba[:, :, i])
767            data = self.data_vector(self.data[:, :, i])
768            return (coba - data) @ self.sigma2[i] @ (coba - data)
769        # If we have only one data vector
770        else:
771            coba = self.coba[0, 0, :]
772            data = self.data[0, 0, :]
773            res = (coba - data) * self.sigma2 * (coba - data)
774            return res

Computes proper chi-square term for the Gaussian likelihood case.

Arguments:
  • i (int, optional): ell index if needed. Defaults to 0.
def compute_chi_part(self, i=0):
776    def compute_chi_part(self, i=0):
777        """Chooses which chi-square term to compute.
778
779        Parameters:
780            i (int, optional):
781                ell index if needed. Defaults to 0.
782        """
783        # check if the likelihood is "exact"
784        if self.like == "exact":
785            # if so, compute the chi-square term for the exact likelihood
786            return self.chi_exact(i)
787        # if not, check if it is "gaussian"
788        elif self.like == "gaussian":
789            # if so, compute the chi-square term for the gaussian likelihood
790            return self.chi_gaussian(i)
791        # if neither, print an error message
792        else:
793            print("You requested something different from 'exact or 'gaussian'!")
794            return

Chooses which chi-square term to compute.

Arguments:
  • i (int, optional): ell index if needed. Defaults to 0.
def log_likelihood(self):
796    def log_likelihood(self):
797        """Computes the log likelihood."""
798        # Get the array of multipoles
799        ell = np.arange(self.lmin, self.lmax + 1, 1)
800        # Compute the log likelihood for each multipole
801        if self.n != 1:
802            logp_ℓ = np.zeros(ell.shape)
803            for i in range(0, self.lmax + 1 - self.lmin):
804                logp_ℓ[i] = -0.5 * (2 * ell[i] + 1) * self.compute_chi_part(i)
805        else:
806            logp_ℓ = -0.5 * (2 * ell + 1) * self.compute_chi_part()
807        # Sum the log likelihood over multipoles
808        return np.sum(logp_ℓ)

Computes the log likelihood.

def logp(self, **params_values):
810    def logp(self, **params_values):
811        """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process."""
812        if self.debug:
813            CAMBdata = self.provider.get_CAMBdata()
814            pars = CAMBdata.Params
815            print(pars)
816
817        # Get the Cls from Cobaya
818        self.cobaCLs = self.provider.get_Cl(ell_factor=True)
819
820        if self.debug:
821            print(f"Keys of Cobaya CLs ---> {self.cobaCLs.keys()}")
822
823            field = "yy"
824            print("\nPrinting the first few values to check that it starts from 0...")
825            print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLs[field][0:5]}")
826
827        # Fill the covariance matrix with the Cls from Cobaya
828        self.cobaCOV = self.cov_filling(self, self.cobaCLs)
829
830        if self.debug:
831            ell = np.arange(0, self.lmax + 1, 1)
832            plt.loglog(ell, self.fiduCOV[0, 0, :], label="Fiducial CLs")
833            plt.loglog(ell, self.cobaCOV[0, 0, :], label="Cobaya CLs", ls="--")
834            plt.loglog(ell, self.noiseCOV[0, 0, :], label="Noise CLs")
835            plt.xlim(2, None)
836            plt.legend()
837            plt.show()
838
839        # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya
840        self.coba = (
841            self.cobaCOV[:, :, self.lmin : self.lmax + 1]
842            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
843        )
844
845        # Compute the likelihood
846        logp = self.log_likelihood()
847
848        if self.debug:
849            print(logp)
850            exit()
851
852        return logp

Gets the log likelihood and pass it to Cobaya to carry on the MCMC process.

Inherited Members
cobaya.likelihood.Likelihood
marginal
calculate
wait
cobaya.theory.Theory
must_provide
initialize_with_params
initialize_with_provider
get_param
get_result
get_can_provide_methods
get_can_provide
get_can_provide_params
get_can_support_params
get_allow_agnostic
input_params_extra
set_cache_size
check_cache_and_compute
get_current_derived
get_provider
get_helper_theories
update_for_helper_theories
get_attr_list_with_helpers
get_speed
set_measured_speed
cobaya.component.CobayaComponent
set_timing_on
get_name
close
set_instance_defaults
get_version
has_version
get_kind
compare_versions
cobaya.log.HasLogger
set_logger
is_debug
mpi_warning
mpi_info
mpi_debug
cobaya.component.HasDefaults
get_qualified_names
get_qualified_class_name
get_class_path
get_file_base_name
get_root_file_name
get_yaml_file
get_desc
get_bibtex
get_associated_file_content
get_class_options
get_defaults
get_annotations
cobaya.likelihood.LikelihoodInterface
current_logp