Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add some benchmarks #221

Open
wants to merge 7 commits into
base: 2.2.x
Choose a base branch
from
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Add some benchmarks
Atry committed Jun 1, 2018

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
commit 2a5efc29d7d863469c37a4ab605c06b5e01cb057
28 changes: 28 additions & 0 deletions benchmark/build.sbt
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
libraryDependencies ++= {
import Ordering.Implicits._
if (VersionNumber(scalaVersion.value).numbers >= Seq(2, 12)) {
Nil
} else {
Seq(
"com.thoughtworks.deeplearning.etl" %% "cifar100" % "0.2.0",
"ch.qos.logback" % "logback-classic" % "1.2.3" % Optional,
"org.nd4j" %% "nd4s" % "0.8.0",
"org.nd4j" % "nd4j-api" % "0.8.0",
"org.nd4j" % "nd4j-native-platform" % "0.8.0" % Optional
)
}
}

fork in Test := true

enablePlugins(JmhPlugin)

publishArtifact := false

addCompilerPlugin("com.thoughtworks.dsl" %% "compilerplugins-bangnotation" % "1.0.0-RC10")

addCompilerPlugin("com.thoughtworks.dsl" %% "compilerplugins-reseteverywhere" % "1.0.0-RC10")

libraryDependencies += "com.thoughtworks.dsl" %% "domains-scalaz" % "1.0.0-RC10"

addCompilerPlugin("com.thoughtworks.import" %% "import" % "2.0.2")
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
package com.thoughtworks.deeplearning.benchmark

import java.util.concurrent.{ExecutorService, Executors}

import com.thoughtworks.deeplearning.DeepLearning
import com.thoughtworks.deeplearning.etl.Cifar100
import com.thoughtworks.deeplearning.etl.Cifar100.Batch
import com.thoughtworks.deeplearning.plugins.Builtins
import com.thoughtworks.feature.Factory
import org.openjdk.jmh.annotations._
import com.thoughtworks.future._
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.factory.Nd4j

import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService}

/**
* @author 杨博 (Yang Bo)
*/
object benchmark {

import $exec.`https://gist.github.com/Atry/1fb0608c655e3233e68b27ba99515f16/raw/39ba06ee597839d618f2fcfe9526744c60f2f70a/FixedLearningRate.sc`

trait LayerOutput {
def numberOfFeatures: Int
type Output
def output: Output
def typeClassInstance: DeepLearning.Aux[Output, INDArray, INDArray]
}
object LayerOutput {
def input(indArray: INDArray): LayerOutput = new LayerOutput {
def numberOfFeatures: Int = indArray.shape().apply(1)

type Output = INDArray
def output = indArray

def typeClassInstance: DeepLearning.Aux[INDArray, INDArray, INDArray] = ???
}
}

@Threads(value = 1)
@State(Scope.Benchmark)
class FourLayer {

@Param(Array("4"))
protected var batchSize: Int = _

@Param(Array("1", "2", "4"))
protected var sizeOfThreadPool: Int = _

@Param(Array("16", "32", "64"))
protected var numberOfHiddenFeatures: Int = _

@Param(Array("16", "8"))
protected var numberOfBranches: Int = _

private implicit var executionContext: ExecutionContextExecutorService = _

private lazy val batches = {
val cifar100: Cifar100 = Cifar100.load().blockingAwait
Iterator.continually(cifar100.epochByCoarseClass(batchSize)).flatten
}

class Model {
val hyperparameters = Factory[Builtins with FixedLearningRate].newInstance(learningRate = 0.0001)

import hyperparameters._, implicits._

object CoarseFeatures extends (INDArray => INDArrayLayer) {

val branches = Seq.fill(numberOfBranches)(new (INDArray => INDArrayLayer) {
object Dense1 extends (INDArray => INDArrayLayer) {
val weight = INDArrayWeight(Nd4j.randn(Cifar100.NumberOfPixelsPerSample, numberOfHiddenFeatures))
val bias = INDArrayWeight(Nd4j.randn(1, numberOfHiddenFeatures))

def apply(input: INDArray) = {
max(input dot weight + bias, 0.0)
}
}

val weight = INDArrayWeight(Nd4j.randn(numberOfHiddenFeatures, numberOfHiddenFeatures))
val bias = INDArrayWeight(Nd4j.randn(1, numberOfHiddenFeatures))

def apply(input: INDArray) = {
max(Dense1(input) dot weight + bias, 0.0)
}
})

def apply(input: INDArray) = {
branches.map(_.apply(input)).reduce(_ + _)
}
}

object CoarseProbabilityModel {
val weight = INDArrayWeight(Nd4j.randn(numberOfHiddenFeatures, Cifar100.NumberOfCoarseClasses))
val bias = INDArrayWeight(Nd4j.randn(1, Cifar100.NumberOfCoarseClasses))

def apply(input: INDArrayLayer) = {
val scores = input dot weight + bias

val expScores = exp(scores)
expScores / expScores.sum(1)
}
}

val fineProbabilityModel = Seq.fill(Cifar100.NumberOfCoarseClasses)(new (INDArrayLayer => INDArrayLayer) {
object Dense2 extends (INDArrayLayer => INDArrayLayer) {

object Dense1 extends (INDArrayLayer => INDArrayLayer) {
val weight = INDArrayWeight(Nd4j.randn(numberOfHiddenFeatures, numberOfHiddenFeatures))
val bias = INDArrayWeight(Nd4j.randn(1, numberOfHiddenFeatures))

def apply(coarseFeatures: INDArrayLayer) = {
max(coarseFeatures dot weight + bias, 0.0)
}
}

val weight = INDArrayWeight(Nd4j.randn(numberOfHiddenFeatures, numberOfHiddenFeatures))
val bias = INDArrayWeight(Nd4j.randn(1, numberOfHiddenFeatures))

def apply(coarseFeatures: INDArrayLayer) = {
max(Dense1(coarseFeatures) dot weight + bias, 0.0)
}
}

val weight = INDArrayWeight(Nd4j.randn(numberOfHiddenFeatures, Cifar100.NumberOfFineClassesPerCoarseClass))
val bias = INDArrayWeight(Nd4j.randn(1, Cifar100.NumberOfFineClassesPerCoarseClass))

def apply(coarseFeatures: INDArrayLayer) = {
val scores = Dense2(coarseFeatures) dot weight + bias

val expScores = exp(scores)
expScores / expScores.sum(1)
}
})

def loss(coarseLabel: Int, batch: Batch): DoubleLayer = {
def crossEntropy(prediction: INDArrayLayer, expectOutput: INDArray): DoubleLayer = {
-(hyperparameters.log(prediction) * expectOutput).mean
}

val Array(batchSize, width, height, channels) = batch.pixels.shape()
val coarseFeatures = CoarseFeatures(batch.pixels.reshape(batchSize, width * height * channels))
val coarseProbabilities = CoarseProbabilityModel(coarseFeatures)
val fineProbabilities = fineProbabilityModel(coarseLabel)(coarseFeatures)

crossEntropy(coarseProbabilities, batch.coarseClasses) + crossEntropy(fineProbabilities, batch.localFineClasses)
}

def train(coarseLabel: Int, batch: Batch) = {
loss(coarseLabel, batch).train
}

}

private var model: Model = null

@Setup
final def setup(): Unit = {
executionContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(sizeOfThreadPool))
model = new Model
}

@TearDown
final def tearDown(): Unit = {
model = null
executionContext.shutdown()
executionContext = null
}

@Benchmark
final def deepLearningDotScala(): Double = {
val (coarseClass, batch) = batches.synchronized {
batches.next()
}
model.train(coarseClass, batch).blockingAwait
}

}

}
3 changes: 3 additions & 0 deletions build.sbt
Original file line number Diff line number Diff line change
@@ -144,6 +144,9 @@ lazy val `plugins-Builtins` =
`plugins-CumulativeINDArrayLayers`,
DeepLearning % "test->test"
)

lazy val benchmark = project.dependsOn(`plugins-Builtins`)

publishArtifact := false

lazy val unidoc =
2 changes: 2 additions & 0 deletions project/plugins.sbt
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
addSbtPlugin("com.thoughtworks.sbt-best-practice" % "sbt-best-practice" % "2.5.0")

addSbtPlugin("com.thoughtworks.example" % "sbt-example" % "2.0.2")

addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.3.4")