trait TensorflowForClassification extends AnyRef
Linear Supertypes
Known Subclasses
Ordering
- Alphabetic
- By Inheritance
Inherited
- TensorflowForClassification
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- All
Abstract Value Members
- abstract def findIndexedToken(tokenizedSentences: Seq[TokenizedSentence], sentence: (WordpieceTokenizedSentence, Int), tokenPiece: TokenPiece): Option[IndexedToken]
-
abstract
val
sentenceEndTokenId: Int
- Attributes
- protected
-
abstract
val
sentencePadTokenId: Int
- Attributes
- protected
-
abstract
val
sentenceStartTokenId: Int
- Attributes
- protected
- abstract def tag(batch: Seq[Array[Int]]): Seq[Array[Array[Float]]]
- abstract def tagSequence(batch: Seq[Array[Int]], activation: String): Array[Array[Float]]
- abstract def tagSpan(batch: Seq[Array[Int]]): (Array[Array[Float]], Array[Array[Float]])
- abstract def tokenizeDocument(docs: Seq[Annotation], maxSeqLength: Int, caseSensitive: Boolean): Seq[WordpieceTokenizedSentence]
- abstract def tokenizeWithAlignment(sentences: Seq[TokenizedSentence], maxSeqLength: Int, caseSensitive: Boolean): Seq[WordpieceTokenizedSentence]
Concrete Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
calculateSigmoid(scores: Array[Float]): Array[Float]
Calcuate sigmoid from returned logits
Calcuate sigmoid from returned logits
- scores
logits output from output layer
-
def
calculateSoftmax(scores: Array[Float]): Array[Float]
Calculate softmax from returned logits
Calculate softmax from returned logits
- scores
logits output from output layer
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
- def constructAnnotationForSequenceClassifier(sentence: Sentence, label: String, meta: Array[(String, String)]): Annotation
- def constructMetaForSequenceClassifier(tags: Map[String, Int], scores: Array[Float]): Array[(String, String)]
-
def
encode(sentences: Seq[(WordpieceTokenizedSentence, Int)], maxSequenceLength: Int): Seq[Array[Int]]
Encode the input sequence to indexes IDs adding padding where necessary
- def encodeSequence(seq1: Seq[WordpieceTokenizedSentence], seq2: Seq[WordpieceTokenizedSentence], maxSequenceLength: Int): Seq[Array[Int]]
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- def predict(tokenizedSentences: Seq[TokenizedSentence], batchSize: Int, maxSentenceLength: Int, caseSensitive: Boolean, tags: Map[String, Int]): Seq[Annotation]
- def predictSequence(tokenizedSentences: Seq[TokenizedSentence], sentences: Seq[Sentence], batchSize: Int, maxSentenceLength: Int, caseSensitive: Boolean, coalesceSentences: Boolean = false, tags: Map[String, Int], activation: String = ActivationFunction.softmax): Seq[Annotation]
- def predictSpan(documents: Seq[Annotation], maxSentenceLength: Int, caseSensitive: Boolean, mergeTokenStrategy: String = MergeTokenStrategy.vocab): Seq[Annotation]
- def scoresToLabelForSequenceClassifier(tags: Map[String, Int], scores: Array[Float]): String
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
toString(): String
- Definition Classes
- AnyRef → Any
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
def
wordAndSpanLevelAlignmentWithTokenizer(tokenLogits: Array[Array[Float]], tokenizedSentences: Seq[TokenizedSentence], sentence: (WordpieceTokenizedSentence, Int), tags: Map[String, Int]): Seq[Annotation]
Word-level and span-level alignment with Tokenizer https://github.com/google-research/bert#tokenization
Word-level and span-level alignment with Tokenizer https://github.com/google-research/bert#tokenization
### Input orig_tokens = ["John", "Johanson", "'s", "house"] labels = ["NNP", "NNP", "POS", "NN"]
# bert_tokens == ["[CLS]", "john", "johan", "##son", "'", "s", "house", "[SEP]"] # orig_to_tok_map == [1, 2, 4, 6]