class TensorflowXlnetClassification extends Serializable with TensorflowForClassification
- Alphabetic
- By Inheritance
- TensorflowXlnetClassification
- TensorflowForClassification
- Serializable
- Serializable
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Instance Constructors
-
new
TensorflowXlnetClassification(tensorflowWrapper: TensorflowWrapper, spp: SentencePieceWrapper, configProtoBytes: Option[Array[Byte]] = None, tags: Map[String, Int], signatures: Option[Map[String, String]] = None)
- tensorflowWrapper
XLNet Model wrapper with TensorFlow Wrapper
- spp
XLNet SentencePiece model with SentencePieceWrapper
- configProtoBytes
Configuration for TensorFlow session
- tags
labels which model was trained with in order
- signatures
TF v2 signatures in Spark NLP
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- val _tfXlnetSignatures: Map[String, String]
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
calculateSigmoid(scores: Array[Float]): Array[Float]
Calcuate sigmoid from returned logits
Calcuate sigmoid from returned logits
- scores
logits output from output layer
- Definition Classes
- TensorflowForClassification
-
def
calculateSoftmax(scores: Array[Float]): Array[Float]
Calculate softmax from returned logits
Calculate softmax from returned logits
- scores
logits output from output layer
- Definition Classes
- TensorflowForClassification
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
def
constructAnnotationForSequenceClassifier(sentence: Sentence, label: String, meta: Array[(String, String)]): Annotation
- Definition Classes
- TensorflowForClassification
-
def
constructMetaForSequenceClassifier(tags: Map[String, Int], scores: Array[Float]): Array[(String, String)]
- Definition Classes
- TensorflowForClassification
-
def
encode(sentences: Seq[(WordpieceTokenizedSentence, Int)], maxSequenceLength: Int): Seq[Array[Int]]
Encode the input sequence to indexes IDs adding padding where necessary
Encode the input sequence to indexes IDs adding padding where necessary
- Definition Classes
- TensorflowForClassification
-
def
encodeSequence(seq1: Seq[WordpieceTokenizedSentence], seq2: Seq[WordpieceTokenizedSentence], maxSequenceLength: Int): Seq[Array[Int]]
- Definition Classes
- TensorflowForClassification
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
def
findIndexedToken(tokenizedSentences: Seq[TokenizedSentence], sentence: (WordpieceTokenizedSentence, Int), tokenPiece: TokenPiece): Option[IndexedToken]
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
def
predict(tokenizedSentences: Seq[TokenizedSentence], batchSize: Int, maxSentenceLength: Int, caseSensitive: Boolean, tags: Map[String, Int]): Seq[Annotation]
- Definition Classes
- TensorflowForClassification
-
def
predictSequence(tokenizedSentences: Seq[TokenizedSentence], sentences: Seq[Sentence], batchSize: Int, maxSentenceLength: Int, caseSensitive: Boolean, coalesceSentences: Boolean = false, tags: Map[String, Int], activation: String = ActivationFunction.softmax): Seq[Annotation]
- Definition Classes
- TensorflowForClassification
-
def
predictSpan(documents: Seq[Annotation], maxSentenceLength: Int, caseSensitive: Boolean, mergeTokenStrategy: String = MergeTokenStrategy.vocab): Seq[Annotation]
- Definition Classes
- TensorflowForClassification
-
def
scoresToLabelForSequenceClassifier(tags: Map[String, Int], scores: Array[Float]): String
- Definition Classes
- TensorflowForClassification
-
val
sentenceEndTokenId: Int
- Attributes
- protected
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
-
val
sentencePadTokenId: Int
- Attributes
- protected
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
-
val
sentenceStartTokenId: Int
- Attributes
- protected
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
- val spp: SentencePieceWrapper
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
tag(batch: Seq[Array[Int]]): Seq[Array[Array[Float]]]
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
-
def
tagSequence(batch: Seq[Array[Int]], activation: String): Array[Array[Float]]
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
-
def
tagSpan(batch: Seq[Array[Int]]): (Array[Array[Float]], Array[Array[Float]])
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
- val tensorflowWrapper: TensorflowWrapper
-
def
toString(): String
- Definition Classes
- AnyRef → Any
-
def
tokenizeDocument(docs: Seq[Annotation], maxSeqLength: Int, caseSensitive: Boolean): Seq[WordpieceTokenizedSentence]
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
-
def
tokenizeWithAlignment(sentences: Seq[TokenizedSentence], maxSeqLength: Int, caseSensitive: Boolean): Seq[WordpieceTokenizedSentence]
- Definition Classes
- TensorflowXlnetClassification → TensorflowForClassification
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
def
wordAndSpanLevelAlignmentWithTokenizer(tokenLogits: Array[Array[Float]], tokenizedSentences: Seq[TokenizedSentence], sentence: (WordpieceTokenizedSentence, Int), tags: Map[String, Int]): Seq[Annotation]
Word-level and span-level alignment with Tokenizer https://github.com/google-research/bert#tokenization
Word-level and span-level alignment with Tokenizer https://github.com/google-research/bert#tokenization
### Input orig_tokens = ["John", "Johanson", "'s", "house"] labels = ["NNP", "NNP", "POS", "NN"]
# bert_tokens == ["[CLS]", "john", "johan", "##son", "'", "s", "house", "[SEP]"] # orig_to_tok_map == [1, 2, 4, 6]
- Definition Classes
- TensorflowForClassification