Source code for gensim.models.word2vec

#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html


"""
Deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.

The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.

For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/

**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).

Initialize a model with e.g.::

>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)

Persist a model to disk with::

>>> model.save(fname)
>>> model = Word2Vec.load(fname)  # you can continue training with the loaded model!

The model can also be instantiated from an existing file on disk in the word2vec C format::

  >>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False)  # C text format
  >>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True)  # C binary format

You can perform various syntactic/semantic NLP word tasks with the model. Some of them
are already built-in::

  >>> model.most_similar(positive=['woman', 'king'], negative=['man'])
  [('queen', 0.50882536), ...]

  >>> model.doesnt_match("breakfast cereal dinner lunch".split())
  'cereal'

  >>> model.similarity('woman', 'man')
  0.73723527

  >>> model['computer']  # raw numpy vector of a word
  array([-0.00449447, -0.00310097,  0.02421786, ...], dtype=float32)

and so on.

If you're finished training a model (=no more updates, only querying), you can do

  >>> model.init_sims(replace=True)

to trim unneeded model memory = use (much) less RAM.

Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:

>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformer[sentences], size=100, ...)

.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
       In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division  # py3 "true division"

import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools

from gensim.utils import keep_vocab_item

try:
    from queue import Queue, Empty
except ImportError:
    from Queue import Queue, Empty

from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
    double, uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
    ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray

from gensim import utils, matutils  # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType

logger = logging.getLogger(__name__)

try:
    from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
    from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
    from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
    logger.debug('Fast version of {0} is being used'.format(__name__))
except ImportError:
    # failed... fall back to plain numpy (20-80x slower training than the above)
    logger.warning('Slow version of {0} is being used'.format(__name__))
    FAST_VERSION = -1
    MAX_WORDS_IN_BATCH = 10000

    def train_batch_sg(model, sentences, alpha, work=None):
        """
        Update skip-gram model by training on a sequence of sentences.

        Each sentence is a list of string tokens, which are looked up in the model's
        vocab dictionary. Called internally from `Word2Vec.train()`.

        This is the non-optimized, Python version. If you have cython installed, gensim
        will use the optimized version from word2vec_inner instead.

        """
        result = 0
        for sentence in sentences:
            word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
                           model.vocab[w].sample_int > model.random.rand() * 2**32]
            for pos, word in enumerate(word_vocabs):
                reduced_window = model.random.randint(model.window)  # `b` in the original word2vec code

                # now go over all words from the (reduced) window, predicting each one in turn
                start = max(0, pos - model.window + reduced_window)
                for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
                    # don't train on the `word` itself
                    if pos2 != pos:
                        train_sg_pair(model, model.index2word[word.index], word2.index, alpha)
            result += len(word_vocabs)
        return result

    def train_batch_cbow(model, sentences, alpha, work=None, neu1=None):
        """
        Update CBOW model by training on a sequence of sentences.

        Each sentence is a list of string tokens, which are looked up in the model's
        vocab dictionary. Called internally from `Word2Vec.train()`.

        This is the non-optimized, Python version. If you have cython installed, gensim
        will use the optimized version from word2vec_inner instead.

        """
        result = 0
        for sentence in sentences:
            word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
                           model.vocab[w].sample_int > model.random.rand() * 2**32]
            for pos, word in enumerate(word_vocabs):
                reduced_window = model.random.randint(model.window)  # `b` in the original word2vec code
                start = max(0, pos - model.window + reduced_window)
                window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
                word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
                l1 = np_sum(model.syn0[word2_indices], axis=0)  # 1 x vector_size
                if word2_indices and model.cbow_mean:
                    l1 /= len(word2_indices)
                train_cbow_pair(model, word, word2_indices, l1, alpha)
            result += len(word_vocabs)
        return result

    def score_sentence_sg(model, sentence, work=None):
        """
        Obtain likelihood score for a single sentence in a fitted skip-gram representaion.

        The sentence is a list of Vocab objects (or None, when the corresponding
        word is not in the vocabulary). Called internally from `Word2Vec.score()`.

        This is the non-optimized, Python version. If you have cython installed, gensim
        will use the optimized version from word2vec_inner instead.

        """

        log_prob_sentence = 0.0
        if model.negative:
            raise RuntimeError("scoring is only available for HS=True")

        word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab]
        for pos, word in enumerate(word_vocabs):
            if word is None:
                continue  # OOV word in the input sentence => skip

            # now go over all words from the window, predicting each one in turn
            start = max(0, pos - model.window)
            for pos2, word2 in enumerate(word_vocabs[start : pos + model.window + 1], start):
                # don't train on OOV words and on the `word` itself
                if word2 is not None and pos2 != pos:
                    log_prob_sentence += score_sg_pair(model, word, word2)

        return log_prob_sentence

    def score_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
        """
        Obtain likelihood score for a single sentence in a fitted CBOW representaion.

        The sentence is a list of Vocab objects (or None, where the corresponding
        word is not in the vocabulary. Called internally from `Word2Vec.score()`.

        This is the non-optimized, Python version. If you have cython installed, gensim
        will use the optimized version from word2vec_inner instead.

        """
        log_prob_sentence = 0.0
        if model.negative:
            raise RuntimeError("scoring is only available for HS=True")

        word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab]
        for pos, word in enumerate(word_vocabs):
            if word is None:
                continue  # OOV word in the input sentence => skip

            start = max(0, pos - model.window)
            window_pos = enumerate(word_vocabs[start:(pos + model.window + 1)], start)
            word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
            l1 = np_sum(model.syn0[word2_indices], axis=0)  # 1 x layer1_size
            if word2_indices and model.cbow_mean:
                l1 /= len(word2_indices)
            log_prob_sentence += score_cbow_pair(model, word, word2_indices, l1)

        return log_prob_sentence

# If pyemd C extension is available, import it.
# If pyemd is attempted to be used, but isn't installed, ImportError will be raised.
try:
    from pyemd import emd
    PYEMD_EXT = True
except ImportError:
    PYEMD_EXT = False

def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
                  context_vectors=None, context_locks=None):
    if context_vectors is None:
        context_vectors = model.syn0
    if context_locks is None:
        context_locks = model.syn0_lockf

    if word not in model.vocab:
        return
    predict_word = model.vocab[word]  # target word (NN output)

    l1 = context_vectors[context_index]  # input word (NN input/projection layer)
    lock_factor = context_locks[context_index]

    neu1e = zeros(l1.shape)

    if model.hs:
        # work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
        l2a = deepcopy(model.syn1[predict_word.point])  # 2d matrix, codelen x layer1_size
        fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T)))  # propagate hidden -> output
        ga = (1 - predict_word.code - fa) * alpha  # vector of error gradients multiplied by the learning rate
        if learn_hidden:
            model.syn1[predict_word.point] += outer(ga, l1)  # learn hidden -> output
        neu1e += dot(ga, l2a)  # save error

    if model.negative:
        # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
        word_indices = [predict_word.index]
        while len(word_indices) < model.negative + 1:
            w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
            if w != predict_word.index:
                word_indices.append(w)
        l2b = model.syn1neg[word_indices]  # 2d matrix, k+1 x layer1_size
        fb = 1. / (1. + exp(-dot(l1, l2b.T)))  # propagate hidden -> output
        gb = (model.neg_labels - fb) * alpha  # vector of error gradients multiplied by the learning rate
        if learn_hidden:
            model.syn1neg[word_indices] += outer(gb, l1)  # learn hidden -> output
        neu1e += dot(gb, l2b)  # save error

    if learn_vectors:
        l1 += neu1e * lock_factor  # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
    return neu1e


def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
    neu1e = zeros(l1.shape)

    if model.hs:
        l2a = model.syn1[word.point]  # 2d matrix, codelen x layer1_size
        fa = 1. / (1. + exp(-dot(l1, l2a.T)))  # propagate hidden -> output
        ga = (1. - word.code - fa) * alpha  # vector of error gradients multiplied by the learning rate
        if learn_hidden:
            model.syn1[word.point] += outer(ga, l1)  # learn hidden -> output
        neu1e += dot(ga, l2a)  # save error

    if model.negative:
        # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
        word_indices = [word.index]
        while len(word_indices) < model.negative + 1:
            w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
            if w != word.index:
                word_indices.append(w)
        l2b = model.syn1neg[word_indices]  # 2d matrix, k+1 x layer1_size
        fb = 1. / (1. + exp(-dot(l1, l2b.T)))  # propagate hidden -> output
        gb = (model.neg_labels - fb) * alpha  # vector of error gradients multiplied by the learning rate
        if learn_hidden:
            model.syn1neg[word_indices] += outer(gb, l1)  # learn hidden -> output
        neu1e += dot(gb, l2b)  # save error

    if learn_vectors:
        # learn input -> hidden, here for all words in the window separately
        if not model.cbow_mean and input_word_indices:
            neu1e /= len(input_word_indices)
        for i in input_word_indices:
            model.syn0[i] += neu1e * model.syn0_lockf[i]

    return neu1e


def score_sg_pair(model, word, word2):
    l1 = model.syn0[word2.index]
    l2a = deepcopy(model.syn1[word.point])  # 2d matrix, codelen x layer1_size
    sgn = (-1.0)**word.code  # ch function, 0-> 1, 1 -> -1
    lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
    return sum(lprob)


def score_cbow_pair(model, word, word2_indices, l1):
    l2a = model.syn1[word.point]  # 2d matrix, codelen x layer1_size
    sgn = (-1.0)**word.code  # ch function, 0-> 1, 1 -> -1
    lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
    return sum(lprob)


class Vocab(object):
    """
    A single vocabulary item, used internally for collecting per-word frequency/sampling info,
    and for constructing binary trees (incl. both word leaves and inner nodes).

    """
    def __init__(self, **kwargs):
        self.count = 0
        self.__dict__.update(kwargs)

    def __lt__(self, other):  # used for sorting in a priority queue
        return self.count < other.count

    def __str__(self):
        vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
        return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))


[docs]class Word2Vec(utils.SaveLoad): """ Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/ The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`. """
[docs] def __init__( self, sentences=None, size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0, trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH): """ Initialize the model from an iterable of `sentences`. Each sentence is a list of words (unicode strings) that will be used for training. The `sentences` iterable can be simply a list, but for larger corpora, consider an iterable that streams the sentences directly from disk/network. See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in this module for such examples. If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it in some other way. `sg` defines the training algorithm. By default (`sg=0`), CBOW is used. Otherwise (`sg=1`), skip-gram is employed. `size` is the dimensionality of the feature vectors. `window` is the maximum distance between the current and predicted word within a sentence. `alpha` is the initial learning rate (will linearly drop to `min_alpha` as training progresses). `seed` = for the random number generator. Initial vectors for each word are seeded with a hash of the concatenation of word + str(seed). Note that for a fully deterministically-reproducible run, you must also limit the model to a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED environment variable to control hash randomization.) `min_count` = ignore all words with total frequency lower than this. `max_vocab_size` = limit RAM during vocabulary building; if there are more unique words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM. Set to `None` for no limit (default). `sample` = threshold for configuring which higher-frequency words are randomly downsampled; default is 1e-3, useful range is (0, 1e-5). `workers` = use this many worker threads to train the model (=faster training with multicore machines). `hs` = if 1, hierarchical softmax will be used for model training. If set to 0 (default), and `negative` is non-zero, negative sampling will be used. `negative` = if > 0, negative sampling will be used, the int for negative specifies how many "noise words" should be drawn (usually between 5-20). Default is 5. If set to 0, no negative samping is used. `cbow_mean` = if 0, use the sum of the context word vectors. If 1 (default), use the mean. Only applies when cbow is used. `hashfxn` = hash function to use to randomly initialize weights, for increased training reproducibility. Default is Python's rudimentary built in hash function. `iter` = number of iterations (epochs) over the corpus. Default is 5. `trim_rule` = vocabulary trimming rule, specifies whether certain words should remain in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count). Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`. Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part of the model. `sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before assigning word indexes. `batch_words` = target size (in words) for batches of examples passed to worker threads (and thus cython routines). Default is 10000. (Larger batches will be passed if individual texts are longer than 10000 words, but the standard cython code truncates to that maximum.) """ self.vocab = {} # mapping from a word (string) to a Vocab object self.index2word = [] # map from a word's matrix index (int) to word (string) self.sg = int(sg) self.cum_table = None # for negative sampling self.vector_size = int(size) self.layer1_size = int(size) if size % 4 != 0: logger.warning("consider setting layer size to a multiple of 4 for greater performance") self.alpha = float(alpha) self.min_alpha_yet_reached = float(alpha) # To warn user if alpha increases self.window = int(window) self.max_vocab_size = max_vocab_size self.seed = seed self.random = random.RandomState(seed) self.min_count = min_count self.sample = sample self.workers = int(workers) self.min_alpha = float(min_alpha) self.hs = hs self.negative = negative self.cbow_mean = int(cbow_mean) self.hashfxn = hashfxn self.iter = iter self.null_word = null_word self.train_count = 0 self.total_train_time = 0 self.sorted_vocab = sorted_vocab self.batch_words = batch_words if sentences is not None: if isinstance(sentences, GeneratorType): raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.") self.build_vocab(sentences, trim_rule=trim_rule) self.train(sentences)
[docs] def make_cum_table(self, power=0.75, domain=2**31 - 1): """ Create a cumulative-distribution table using stored vocabulary word counts for drawing random words in the negative-sampling training routines. To draw a word index, choose a random integer up to the maximum value in the table (cum_table[-1]), then finding that integer's sorted insertion point (as if by bisect_left or ndarray.searchsorted()). That insertion point is the drawn index, coming up in proportion equal to the increment at that slot. Called internally from 'build_vocab()'. """ vocab_size = len(self.index2word) self.cum_table = zeros(vocab_size, dtype=uint32) # compute sum of all power (Z in paper) train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab])) cumulative = 0.0 for word_index in range(vocab_size): cumulative += self.vocab[self.index2word[word_index]].count**power / train_words_pow self.cum_table[word_index] = round(cumulative * domain) if len(self.cum_table) > 0: assert self.cum_table[-1] == domain
[docs] def create_binary_tree(self): """ Create a binary Huffman tree using stored vocabulary word counts. Frequent words will have shorter binary codes. Called internally from `build_vocab()`. """ logger.info("constructing a huffman tree from %i words", len(self.vocab)) # build the huffman tree heap = list(itervalues(self.vocab)) heapq.heapify(heap) for i in xrange(len(self.vocab) - 1): min1, min2 = heapq.heappop(heap), heapq.heappop(heap) heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2)) # recurse over the tree, assigning a binary code to each vocabulary word if heap: max_depth, stack = 0, [(heap[0], [], [])] while stack: node, codes, points = stack.pop() if node.index < len(self.vocab): # leaf node => store its path from the root node.code, node.point = codes, points max_depth = max(len(codes), max_depth) else: # inner node => continue recursion points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32) stack.append((node.left, array(list(codes) + [0], dtype=uint8), points)) stack.append((node.right, array(list(codes) + [1], dtype=uint8), points)) logger.info("built huffman tree with maximum node depth %i", max_depth)
[docs] def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000): """ Build vocabulary from a sequence of sentences (can be a once-only generator stream). Each sentence must be a list of unicode strings. """ self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule) # initial survey self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule) # trim by min_count & precalculate downsampling self.finalize_vocab() # build tables & arrays
[docs] def scan_vocab(self, sentences, progress_per=10000, trim_rule=None): """Do an initial scan of all words appearing in sentences.""" logger.info("collecting all words and their counts") sentence_no = -1 total_words = 0 min_reduce = 1 vocab = defaultdict(int) checked_string_types = 0 for sentence_no, sentence in enumerate(sentences): if not checked_string_types: if isinstance(sentence, string_types): logger.warn("Each 'sentences' item should be a list of words (usually unicode strings)." "First item here is instead plain %s.", type(sentence)) checked_string_types += 1 if sentence_no % progress_per == 0: logger.info("PROGRESS: at sentence #%i, processed %i words, keeping %i word types", sentence_no, sum(itervalues(vocab)) + total_words, len(vocab)) for word in sentence: vocab[word] += 1 if self.max_vocab_size and len(vocab) > self.max_vocab_size: total_words += utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule) min_reduce += 1 total_words += sum(itervalues(vocab)) logger.info("collected %i word types from a corpus of %i raw words and %i sentences", len(vocab), total_words, sentence_no + 1) self.corpus_count = sentence_no + 1 self.raw_vocab = vocab
[docs] def scale_vocab(self, min_count=None, sample=None, dry_run=False, keep_raw_vocab=False, trim_rule=None): """ Apply vocabulary settings for `min_count` (discarding less-frequent words) and `sample` (controlling the downsampling of more-frequent words). Calling with `dry_run=True` will only simulate the provided settings and report the size of the retained vocabulary, effective corpus length, and estimated memory requirements. Results are both printed via logging and returned as a dict. Delete the raw vocabulary after the scaling is done to free up RAM, unless `keep_raw_vocab` is set. """ min_count = min_count or self.min_count sample = sample or self.sample # Discard words less-frequent than min_count if not dry_run: self.index2word = [] # make stored settings match these applied settings self.min_count = min_count self.sample = sample self.vocab = {} drop_unique, drop_total, retain_total, original_total = 0, 0, 0, 0 retain_words = [] for word, v in iteritems(self.raw_vocab): if keep_vocab_item(word, v, min_count, trim_rule=trim_rule): retain_words.append(word) retain_total += v original_total += v if not dry_run: self.vocab[word] = Vocab(count=v, index=len(self.index2word)) self.index2word.append(word) else: drop_unique += 1 drop_total += v original_total += v logger.info("min_count=%d retains %i unique words (drops %i)", min_count, len(retain_words), drop_unique) logger.info("min_count leaves %i word corpus (%i%% of original %i)", retain_total, retain_total * 100 / max(original_total, 1), original_total) # Precalculate each vocabulary item's threshold for sampling if not sample: # no words downsampled threshold_count = retain_total elif sample < 1.0: # traditional meaning: set parameter as proportion of total threshold_count = sample * retain_total else: # new shorthand: sample >= 1 means downsample all words with higher count than sample threshold_count = int(sample * (3 + sqrt(5)) / 2) downsample_total, downsample_unique = 0, 0 for w in retain_words: v = self.raw_vocab[w] word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v) if word_probability < 1.0: downsample_unique += 1 downsample_total += word_probability * v else: word_probability = 1.0 downsample_total += v if not dry_run: self.vocab[w].sample_int = int(round(word_probability * 2**32)) if not dry_run and not keep_raw_vocab: logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab)) self.raw_vocab = defaultdict(int) logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique) logger.info("downsampling leaves estimated %i word corpus (%.1f%% of prior %i)", downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total) # return from each step: words-affected, resulting-corpus-size report_values = {'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique, 'downsample_total': int(downsample_total)} # print extra memory estimates report_values['memory'] = self.estimate_memory(vocab_size=len(retain_words)) return report_values
[docs] def finalize_vocab(self): """Build tables and model weights based on final vocabulary settings.""" if not self.index2word: self.scale_vocab() if self.sorted_vocab: self.sort_vocab() if self.hs: # add info about each word's Huffman encoding self.create_binary_tree() if self.negative: # build the table for drawing random words (for negative sampling) self.make_cum_table() if self.null_word: # create null pseudo-word for padding when using concatenative L1 (run-of-words) # this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter word, v = '\0', Vocab(count=1, sample_int=0) v.index = len(self.vocab) self.index2word.append(word) self.vocab[word] = v # set initial input/projection and hidden weights self.reset_weights()
[docs] def sort_vocab(self): """Sort the vocabulary so the most frequent words have the lowest indexes.""" if hasattr(self, 'syn0'): raise RuntimeError("must sort before initializing vectors/weights") self.index2word.sort(key=lambda word: self.vocab[word].count, reverse=True) for i, word in enumerate(self.index2word): self.vocab[word].index = i
[docs] def reset_from(self, other_model): """ Borrow shareable pre-built structures (like vocab) from the other_model. Useful if testing multiple models in parallel on the same corpus. """ self.vocab = other_model.vocab self.index2word = other_model.index2word self.cum_table = other_model.cum_table self.corpus_count = other_model.corpus_count self.reset_weights()
def _do_train_job(self, sentences, alpha, inits): """ Train a single batch of sentences. Return 2-tuple `(effective word count after ignoring unknown words and sentence length trimming, total word count)`. """ work, neu1 = inits tally = 0 if self.sg: tally += train_batch_sg(self, sentences, alpha, work) else: tally += train_batch_cbow(self, sentences, alpha, work, neu1) return tally, self._raw_word_count(sentences) def _raw_word_count(self, job): """Return the number of words in a given job.""" return sum(len(sentence) for sentence in job)
[docs] def train(self, sentences, total_words=None, word_count=0, total_examples=None, queue_factor=2, report_delay=1.0): """ Update the model's neural weights from a sequence of sentences (can be a once-only generator stream). For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.) To support linear learning-rate decay from (initial) alpha to min_alpha, either total_examples (count of sentences) or total_words (count of raw words in sentences) should be provided, unless the sentences are the same as those that were used to initially build the vocabulary. """ if FAST_VERSION < 0: import warnings warnings.warn("C extension not loaded for Word2Vec, training will be slow. " "Install a C compiler and reinstall gensim for fast training.") self.neg_labels = [] if self.negative > 0: # precompute negative labels optimization for pure-python training self.neg_labels = zeros(self.negative + 1) self.neg_labels[0] = 1. logger.info( "training model with %i workers on %i vocabulary and %i features, " "using sg=%s hs=%s sample=%s negative=%s", self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative) if not self.vocab: raise RuntimeError("you must first build vocabulary before training the model") if not hasattr(self, 'syn0'): raise RuntimeError("you must first finalize vocabulary before training the model") if total_words is None and total_examples is None: if self.corpus_count: total_examples = self.corpus_count logger.info("expecting %i sentences, matching count from corpus used for vocabulary survey", total_examples) else: raise ValueError("you must provide either total_words or total_examples, to enable alpha and progress calculations") job_tally = 0 if self.iter > 1: sentences = utils.RepeatCorpusNTimes(sentences, self.iter) total_words = total_words and total_words * self.iter total_examples = total_examples and total_examples * self.iter def worker_loop(): """Train the model, lifting lists of sentences from the job_queue.""" work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL) jobs_processed = 0 while True: job = job_queue.get() if job is None: progress_queue.put(None) break # no more jobs => quit this worker sentences, alpha = job tally, raw_tally = self._do_train_job(sentences, alpha, (work, neu1)) progress_queue.put((len(sentences), tally, raw_tally)) # report back progress jobs_processed += 1 logger.debug("worker exiting, processed %i jobs", jobs_processed) def job_producer(): """Fill jobs queue using the input `sentences` iterator.""" job_batch, batch_size = [], 0 pushed_words, pushed_examples = 0, 0 next_alpha = self.alpha if next_alpha > self.min_alpha_yet_reached: logger.warn("Effective 'alpha' higher than previous training cycles") self.min_alpha_yet_reached = next_alpha job_no = 0 for sent_idx, sentence in enumerate(sentences): sentence_length = self._raw_word_count([sentence]) # can we fit this sentence into the existing job batch? if batch_size + sentence_length <= self.batch_words: # yes => add it to the current job job_batch.append(sentence) batch_size += sentence_length else: # no => submit the existing job logger.debug( "queueing job #%i (%i words, %i sentences) at alpha %.05f", job_no, batch_size, len(job_batch), next_alpha) job_no += 1 job_queue.put((job_batch, next_alpha)) # update the learning rate for the next job if self.min_alpha < next_alpha: if total_examples: # examples-based decay pushed_examples += len(job_batch) progress = 1.0 * pushed_examples / total_examples else: # words-based decay pushed_words += self._raw_word_count(job_batch) progress = 1.0 * pushed_words / total_words next_alpha = self.alpha - (self.alpha - self.min_alpha) * progress next_alpha = max(self.min_alpha, next_alpha) # add the sentence that didn't fit as the first item of a new job job_batch, batch_size = [sentence], sentence_length # add the last job too (may be significantly smaller than batch_words) if job_batch: logger.debug( "queueing job #%i (%i words, %i sentences) at alpha %.05f", job_no, batch_size, len(job_batch), next_alpha) job_no += 1 job_queue.put((job_batch, next_alpha)) if job_no == 0 and self.train_count ==