class DistributedLDAModel extends LDAModel
Distributed LDA model. This model stores the inferred topics, the full training dataset, and the topic distributions.
- Annotations
- @Since( "1.3.0" )
- Alphabetic
- By Inheritance
- DistributedLDAModel
- LDAModel
- Saveable
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Value Members
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        !=(arg0: Any): Boolean
      
      
      - Definition Classes
- AnyRef → Any
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        ##(): Int
      
      
      - Definition Classes
- AnyRef → Any
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        ==(arg0: Any): Boolean
      
      
      - Definition Classes
- AnyRef → Any
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        asInstanceOf[T0]: T0
      
      
      - Definition Classes
- Any
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        clone(): AnyRef
      
      
      - Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        describeTopics(maxTermsPerTopic: Int): Array[(Array[Int], Array[Double])]
      
      
      Return the topics described by weighted terms. Return the topics described by weighted terms. - maxTermsPerTopic
- Maximum number of terms to collect for each topic. 
- returns
- Array over topics. Each topic is represented as a pair of matching arrays: (term indices, term weights in topic). Each topic's terms are sorted in order of decreasing weight. 
 - Definition Classes
- DistributedLDAModel → LDAModel
- Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        describeTopics(): Array[(Array[Int], Array[Double])]
      
      
      Return the topics described by weighted terms. Return the topics described by weighted terms. WARNING: If vocabSize and k are large, this can return a large object! - returns
- Array over topics. Each topic is represented as a pair of matching arrays: (term indices, term weights in topic). Each topic's terms are sorted in order of decreasing weight. 
 - Definition Classes
- LDAModel
- Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        
        val
      
      
        docConcentration: Vector
      
      
      Concentration parameter (commonly named "alpha") for the prior placed on documents' distributions over topics ("theta"). Concentration parameter (commonly named "alpha") for the prior placed on documents' distributions over topics ("theta"). This is the parameter to a Dirichlet distribution. - Definition Classes
- DistributedLDAModel → LDAModel
- Annotations
- @Since( "1.5.0" )
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        eq(arg0: AnyRef): Boolean
      
      
      - Definition Classes
- AnyRef
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        equals(arg0: Any): Boolean
      
      
      - Definition Classes
- AnyRef → Any
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        finalize(): Unit
      
      
      - Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
 
- 
      
      
      
        
      
    
      
        
        val
      
      
        gammaShape: Double
      
      
      Shape parameter for random initialization of variational parameter gamma. Shape parameter for random initialization of variational parameter gamma. Used for variational inference for perplexity and other test-time computations. - Attributes
- protected[clustering]
- Definition Classes
- DistributedLDAModel → LDAModel
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        getClass(): Class[_]
      
      
      - Definition Classes
- AnyRef → Any
- Annotations
- @native()
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        hashCode(): Int
      
      
      - Definition Classes
- AnyRef → Any
- Annotations
- @native()
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        isInstanceOf[T0]: Boolean
      
      
      - Definition Classes
- Any
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        javaTopTopicsPerDocument(k: Int): JavaRDD[(Long, Array[Int], Array[Double])]
      
      
      Java-friendly version of topTopicsPerDocument Java-friendly version of topTopicsPerDocument - Annotations
- @Since( "1.5.0" )
 
- 
      
      
      
        
      
    
      
        
        lazy val
      
      
        javaTopicAssignments: JavaRDD[(Long, Array[Int], Array[Int])]
      
      
      Java-friendly version of topicAssignments Java-friendly version of topicAssignments - Annotations
- @Since( "1.5.0" )
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        javaTopicDistributions: JavaPairRDD[Long, Vector]
      
      
      Java-friendly version of topicDistributions Java-friendly version of topicDistributions - Annotations
- @Since( "1.4.1" )
 
- 
      
      
      
        
      
    
      
        
        val
      
      
        k: Int
      
      
      Number of topics Number of topics - Definition Classes
- DistributedLDAModel → LDAModel
- Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        
        lazy val
      
      
        logLikelihood: Double
      
      
      Log likelihood of the observed tokens in the training set, given the current parameter estimates: log P(docs | topics, topic distributions for docs, alpha, eta) Log likelihood of the observed tokens in the training set, given the current parameter estimates: log P(docs | topics, topic distributions for docs, alpha, eta) Note: - Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        
        lazy val
      
      
        logPrior: Double
      
      
      Log probability of the current parameter estimate: log P(topics, topic distributions for docs | alpha, eta) Log probability of the current parameter estimate: log P(topics, topic distributions for docs | alpha, eta) - Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        ne(arg0: AnyRef): Boolean
      
      
      - Definition Classes
- AnyRef
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        notify(): Unit
      
      
      - Definition Classes
- AnyRef
- Annotations
- @native()
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        notifyAll(): Unit
      
      
      - Definition Classes
- AnyRef
- Annotations
- @native()
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        save(sc: SparkContext, path: String): Unit
      
      
      Save this model to the given path. Save this model to the given path. This saves: - human-readable (JSON) model metadata to path/metadata/
- Parquet formatted data to path/data/
 The model may be loaded using Loader.load.- sc
- Spark context used to save model data. 
- path
- Path specifying the directory in which to save this model. If the directory already exists, this method throws an exception. 
 - Definition Classes
- DistributedLDAModel → Saveable
- Annotations
- @Since( "1.5.0" )
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        synchronized[T0](arg0: ⇒ T0): T0
      
      
      - Definition Classes
- AnyRef
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        toLocal: LocalLDAModel
      
      
      Convert model to a local model. Convert model to a local model. The local model stores the inferred topics but not the topic distributions for training documents. - Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        toString(): String
      
      
      - Definition Classes
- AnyRef → Any
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        topDocumentsPerTopic(maxDocumentsPerTopic: Int): Array[(Array[Long], Array[Double])]
      
      
      Return the top documents for each topic Return the top documents for each topic - maxDocumentsPerTopic
- Maximum number of documents to collect for each topic. 
- returns
- Array over topics. Each element represent as a pair of matching arrays: (IDs for the documents, weights of the topic in these documents). For each topic, documents are sorted in order of decreasing topic weights. 
 - Annotations
- @Since( "1.5.0" )
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        topTopicsPerDocument(k: Int): RDD[(Long, Array[Int], Array[Double])]
      
      
      For each document, return the top k weighted topics for that document and their weights. For each document, return the top k weighted topics for that document and their weights. - returns
- RDD of (doc ID, topic indices, topic weights) 
 - Annotations
- @Since( "1.5.0" )
 
- 
      
      
      
        
      
    
      
        
        lazy val
      
      
        topicAssignments: RDD[(Long, Array[Int], Array[Int])]
      
      
      Return the top topic for each (doc, term) pair. Return the top topic for each (doc, term) pair. I.e., for each document, what is the most likely topic generating each term? - returns
- RDD of (doc ID, assignment of top topic index for each term), where the assignment is specified via a pair of zippable arrays (term indices, topic indices). Note that terms will be omitted if not present in the document. 
 - Annotations
- @Since( "1.5.0" )
 
- 
      
      
      
        
      
    
      
        
        val
      
      
        topicConcentration: Double
      
      
      Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics' distributions over terms. Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics' distributions over terms. This is the parameter to a symmetric Dirichlet distribution. - Definition Classes
- DistributedLDAModel → LDAModel
- Annotations
- @Since( "1.5.0" )
- Note
- The topics' distributions over terms are called "beta" in the original LDA paper by Blei et al., but are called "phi" in many later papers such as Asuncion et al., 2009. 
 
- 
      
      
      
        
      
    
      
        
        def
      
      
        topicDistributions: RDD[(Long, Vector)]
      
      
      For each document in the training set, return the distribution over topics for that document ("theta_doc"). For each document in the training set, return the distribution over topics for that document ("theta_doc"). - returns
- RDD of (document ID, topic distribution) pairs 
 - Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        
        lazy val
      
      
        topicsMatrix: Matrix
      
      
      Inferred topics, where each topic is represented by a distribution over terms. Inferred topics, where each topic is represented by a distribution over terms. This is a matrix of size vocabSize x k, where each column is a topic. No guarantees are given about the ordering of the topics. WARNING: This matrix is collected from an RDD. Beware memory usage when vocabSize, k are large. - Definition Classes
- DistributedLDAModel → LDAModel
- Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        
        val
      
      
        vocabSize: Int
      
      
      Vocabulary size (number of terms or terms in the vocabulary) Vocabulary size (number of terms or terms in the vocabulary) - Definition Classes
- DistributedLDAModel → LDAModel
- Annotations
- @Since( "1.3.0" )
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(): Unit
      
      
      - Definition Classes
- AnyRef
- Annotations
- @throws( ... )
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(arg0: Long, arg1: Int): Unit
      
      
      - Definition Classes
- AnyRef
- Annotations
- @throws( ... )
 
- 
      
      
      
        
      
    
      
        final 
        def
      
      
        wait(arg0: Long): Unit
      
      
      - Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
 
 Databricks Scala Spark API
   Databricks Scala Spark API