object
BackPressureHandling
Type Members
-
case class
Ack(offset: Int) extends akka.io.Tcp.Event with Product with Serializable
-
class
OutQueue extends AnyRef
Value Members
-
final
def
!=(arg0: AnyRef): Boolean
-
final
def
!=(arg0: Any): Boolean
-
final
def
##(): Int
-
final
def
==(arg0: AnyRef): Boolean
-
final
def
==(arg0: Any): Boolean
-
object
CanCloseNow extends akka.io.Tcp.Event
-
val
ProbeForEndOfWriting: Write
-
val
ProbeForWriteQueueEmpty: Write
-
object
ResumeReadingNow extends akka.io.Tcp.Event
-
def
apply(ackRate: Int, lowWatermark: Int = Int.MaxValue): PipelineStage
-
final
def
asInstanceOf[T0]: T0
-
def
clone(): AnyRef
-
final
def
eq(arg0: AnyRef): Boolean
-
def
equals(arg0: Any): Boolean
-
def
finalize(): Unit
-
final
def
getClass(): Class[_]
-
def
hashCode(): Int
-
final
def
isInstanceOf[T0]: Boolean
-
final
def
ne(arg0: AnyRef): Boolean
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
def
toString(): String
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
Inherited from AnyRef
Inherited from Any
Automated back-pressure handling is based on the idea that pressure is created by the consumer but experienced at the producer side. E.g. for http that means that a too big number of incoming requests is the ultimate cause of an experienced bottleneck on the response sending side.
The principle of applying back-pressure means that the best way of handling pressure is by handling it at the root cause which means throttling the rate at which work requests are coming in. That's the underlying assumption here: work is generated on the incoming network side. If that's not true, e.g. when the network stream is a truly bi-directional one (e.g. websockets) the strategy presented here won't be optimal.
How it works:
No pressure:
Pressure:
Possible improvement: (see http://doc.akka.io/docs/akka/2.2.0-RC1/scala/io-tcp.html)