Skip to main content
  • Home
  • Development
  • Documentation
  • Donate
  • Operational login
  • Browse the archive

swh logo
SoftwareHeritage
Software
Heritage
Archive
Features
  • Search

  • Downloads

  • Save code now

  • Add forge now

  • Help

  • 8b80f07
  • /
  • decomposition
  • /
  • _tucker.py
Raw File Download
Permalinks

To reference or cite the objects present in the Software Heritage archive, permalinks based on SoftWare Hash IDentifiers (SWHIDs) must be used.
Select below a type of object currently browsed in order to display its associated SWHID and permalink.

  • content
  • directory
content badge Iframe embedding
swh:1:cnt:f6bb0e3d9a062fe7bc93c86a4bd118f8d737c82b
directory badge Iframe embedding
swh:1:dir:9d0b5cd7699c2f0cf54ad2c24d62cb2ef96a3df3
Citations

This interface enables to generate software citations, provided that the root directory of browsed objects contains a citation.cff or codemeta.json file.
Select below a type of object currently browsed in order to generate citations for them.

  • content
  • directory
Generate software citation in BibTex format (requires biblatex-software package)
Generating citation ...
Generate software citation in BibTex format (requires biblatex-software package)
Generating citation ...
_tucker.py
import numpy as np
from ..base import unfold
from ..tenalg import multi_mode_dot, mode_dot, norm
from ..tenalg import partial_svd
from ..tucker import tucker_to_tensor
from ..utils import check_random_state

# Author: Jean Kossaifi <jean.kossaifi+tensors@gmail.com>


def tucker(tensor, ranks=None, n_iter_max=100, init='svd', tol=10e-5,
           random_state=None, verbose=False):
    """Tucker decomposition via Higher Order Orthogonal Iteration (HOI)

        Decomposes `tensor` into a Tucker decomposition:
        ``tensor = [| core; factors[0], ...factors[-1] |]``

    Parameters
    ----------
    tensor : ndarray
    ranks : None or int list
            size of the core tensor, ``(len(ranks) == tensor.ndim)``
    n_iter_max : int
                 maximum number of iteration
    init : {'svd', 'random'}, optional
    tol : float, optional
          tolerance: the algorithm stops when the variation in
          the reconstruction error is less than the tolerance
    random_state : {None, int, np.random.RandomState}
    verbose : int, optional
        level of verbosity

    Returns
    -------
    core : ndarray of size `ranks`
            core tensor of the Tucker decomposition
    factors : ndarray list
            list of factors of the Tucker decomposition.
            Its ``i``-th element is of shape ``(tensor.shape[i], ranks[i])``
    """
    if ranks is None:
        ranks = [s for s in tensor.shape]

    # SVD init
    if init == 'svd':
        factors = []
        for mode in range(tensor.ndim):
            eigenvecs, _, _ = partial_svd(unfold(tensor, mode), n_eigenvecs=ranks[mode])
            factors.append(eigenvecs)
    else:
        rng = check_random_state(random_state)
        core = rng.random_sample(ranks)
        factors = [rng.random_sample(s) for s in zip(tensor.shape, ranks)]

    rec_errors = []
    norm_tensor = norm(tensor, 2)

    for iteration in range(n_iter_max):
        for mode in range(tensor.ndim):
            core_approximation = tucker_to_tensor(tensor, factors, skip_factor=mode, transpose_factors=True)
            eigenvecs, _, _ = partial_svd(unfold(core_approximation, mode), n_eigenvecs=ranks[mode])
            factors[mode] = eigenvecs

        core = tucker_to_tensor(tensor, factors, transpose_factors=True)

        rec_error = np.sqrt(norm_tensor**2 - norm(core, 2)**2) / norm_tensor
        #rec_error = norm(tensor - tucker_to_tensor(core, factors), 2) / norm_tensor
        rec_errors.append(rec_error)

        if iteration > 1:
            if verbose:
                print('reconsturction error={}, variation={}.'.format(
                    rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

            if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
                if verbose:
                    print('converged in {} iterations.'.format(iteration))
                break

    return core, factors


def non_negative_tucker(tensor, ranks, n_iter_max=10, init='svd', tol=10e-5,
                        random_state=None, verbose=False):
    """Non-negative Tucker decomposition

        Iterative multiplicative update, see [1]_, [2]_

    Parameters
    ----------
    tensor : ``ndarray``
    rank   : int
            number of components
    n_iter_max : int
                 maximum number of iteration
    init : {'svd', 'random'}
    random_state : {None, int, np.random.RandomState}

    Returns
    -------
    core : ndarray
            positive core of the Tucker decomposition
            has shape `ranks`
    factors : ndarray list
            list of factors of the CP decomposition
            element `i` is of shape (tensor.shape[i], rank)

    References
    ----------
    .. [1] G. Zhou, A. Cichocki, Q. Zhao and S. Xie,
       "Nonnegative Matrix and Tensor Factorizations : An algorithmic perspective,"
       in IEEE Signal Processing Magazine, vol. 31, no. 3, pp. 54-65, May 2014.

    .. [2] DD Lee and HS Seung,
       "Algorithms for non-negative matrix factorization",
       in Advances in neural information processing systems (NIPS), 2001
    """
    epsilon = 10e-12

    # Initialisation
    if init == 'svd':
        core, factors = tucker(tensor, ranks)
        nn_factors = [np.abs(f) for f in factors]
        nn_core = np.abs(core)
    else:
        rng = check_random_state(random_state)
        core = rng.random_sample(ranks) + 0.01  # Check this
        factors = [rng.random_sample(s) for s in zip(tensor.shape, ranks)]
        nn_factors = [np.abs(f) for f in factors]
        nn_core = np.abs(core)

    n_factors = len(nn_factors)
    norm_tensor = norm(tensor, 2)
    rec_errors = []

    for iteration in range(n_iter_max):
        for mode in range(tensor.ndim):
            B = tucker_to_tensor(nn_core, nn_factors, skip_factor=mode)
            B = unfold(B, mode).T

            numerator = np.dot(unfold(tensor, mode), B)
            numerator = numerator.clip(min=epsilon)
            denominator = np.dot(nn_factors[mode], B.T.dot(B))
            denominator = denominator.clip(min=epsilon)
            nn_factors[mode] *= numerator / denominator

        numerator = tucker_to_tensor(tensor, nn_factors, transpose_factors=True)
        numerator = numerator.clip(min=epsilon)
        for i, f in enumerate(nn_factors):
            if i:
                denominator = mode_dot(denominator, f.T.dot(f), i)
            else:
                denominator = mode_dot(nn_core, f.T.dot(f), i)
        denominator = denominator.clip(min=epsilon)
        nn_core *= numerator / denominator

        rec_error = norm(tensor - tucker_to_tensor(nn_core, nn_factors), 2) / norm_tensor
        rec_errors.append(rec_error)
        if iteration > 1 and verbose:
            print('reconsturction error={}, variation={}.'.format(
                rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

        if iteration > 1 and abs(rec_errors[-2] - rec_errors[-1]) < tol:
            if verbose:
                print('converged in {} iterations.'.format(iteration))
            break

    return nn_core, nn_factors

Software Heritage — Copyright (C) 2015–2025, The Software Heritage developers. License: GNU AGPLv3+.
The source code of Software Heritage itself is available on our development forge.
The source code files archived by Software Heritage are available under their own copyright and licenses.
Terms of use: Archive access, API— Contact— JavaScript license information— Web API

back to top