Keep both id and project name when parsing JSON

Both the project 'id' and 'name' are necessary.
The former is used to build the contributors url API,
the latter for display purposes.

Change-Id: Iabc9286ebe37276402dedb128d743eff0511c644
diff --git a/src/main/scala/com/gerritforge/analytics/engine/GerritAnalyticsTransformations.scala b/src/main/scala/com/gerritforge/analytics/engine/GerritAnalyticsTransformations.scala
index 3c37d89..195cc02 100644
--- a/src/main/scala/com/gerritforge/analytics/engine/GerritAnalyticsTransformations.scala
+++ b/src/main/scala/com/gerritforge/analytics/engine/GerritAnalyticsTransformations.scala
@@ -20,21 +20,20 @@
 import java.time.format.DateTimeFormatter
 import java.time.{LocalDateTime, ZoneId, ZoneOffset, ZonedDateTime}
 
-import com.gerritforge.analytics.model.{Email, GerritEndpointConfig, ProjectContributionSource}
+import com.gerritforge.analytics.model._
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.functions.{udf, _}
 import org.apache.spark.sql.{DataFrame, SparkSession}
 
 import scala.collection.JavaConverters._
-import scala.util.{Failure, Success, Try}
 
 object GerritAnalyticsTransformations {
 
-  implicit class PimpedRDDString(val rdd: RDD[String]) extends AnyVal {
+  implicit class PimpedRDDString(val rdd: RDD[GerritProject]) extends AnyVal {
 
     def enrichWithSource(implicit config: GerritEndpointConfig): RDD[ProjectContributionSource] = {
-      rdd.map { projectName =>
-        ProjectContributionSource(projectName, config.contributorsUrl(projectName))
+      rdd.map { project =>
+        ProjectContributionSource(project.name, config.contributorsUrl(project.id))
       }
     }
   }
diff --git a/src/main/scala/com/gerritforge/analytics/job/Main.scala b/src/main/scala/com/gerritforge/analytics/job/Main.scala
index 8c7953f..87468e5 100644
--- a/src/main/scala/com/gerritforge/analytics/job/Main.scala
+++ b/src/main/scala/com/gerritforge/analytics/job/Main.scala
@@ -15,7 +15,7 @@
 package com.gerritforge.analytics.job
 
 import com.gerritforge.analytics.engine.GerritAnalyticsTransformations._
-import com.gerritforge.analytics.model.{GerritEndpointConfig, GerritProjects}
+import com.gerritforge.analytics.model.{GerritEndpointConfig, GerritProjectsRDD}
 import org.apache.spark.sql.{DataFrame, SparkSession}
 
 import scala.io.{Codec, Source}
@@ -70,8 +70,8 @@
 
   def run()(implicit config: GerritEndpointConfig, spark: SparkSession): DataFrame = {
     import spark.sqlContext.implicits._ // toDF
-    val sc = spark.sparkContext
-    val projects = sc.parallelize(GerritProjects(Source.fromURL(config.gerritProjectsUrl)))
+    implicit val sc = spark.sparkContext
+    val projects = GerritProjectsRDD(Source.fromURL(config.gerritProjectsUrl))
     val aliasesDF = getAliasDF(config.emailAlias)
 
     projects
@@ -84,6 +84,7 @@
       .convertDates("last_commit_date")
 
   }
+
   def saveES(df: DataFrame)(implicit config: GerritEndpointConfig) {
     import org.elasticsearch.spark.sql._
     config.elasticIndex.map(df.saveToEs(_))
diff --git a/src/main/scala/com/gerritforge/analytics/model/GerritProjects.scala b/src/main/scala/com/gerritforge/analytics/model/GerritProject.scala
similarity index 63%
rename from src/main/scala/com/gerritforge/analytics/model/GerritProjects.scala
rename to src/main/scala/com/gerritforge/analytics/model/GerritProject.scala
index bbfee35..ad8ca50 100644
--- a/src/main/scala/com/gerritforge/analytics/model/GerritProjects.scala
+++ b/src/main/scala/com/gerritforge/analytics/model/GerritProject.scala
@@ -14,23 +14,29 @@
 
 package com.gerritforge.analytics.model
 
-import scala.io.Source
+import org.apache.spark.SparkContext
+import org.apache.spark.rdd.RDD
 import org.json4s.native.JsonMethods.parse
 
-object GerritProjects {
+import scala.io.Source
 
-  type GerritProjects = Seq[String]
+case class GerritProject(id: String, name: String)
+
+object GerritProjectsRDD {
 
   val GERRIT_PREFIX = ")]}'\n"
   private val GERRIT_PREFIX_LEN = GERRIT_PREFIX.length
 
-  def apply(jsonSource: Source) =
-    parse(jsonSource.drop(GERRIT_PREFIX_LEN).mkString)
-      .values
-      .asInstanceOf[Map[String,Map[String,String]]]
-      .values
-      .map(_("id"))
-      .toSeq
+  def apply(jsonSource: Source)(implicit sc: SparkContext): RDD[GerritProject] =
+    sc.parallelize(
+      parse(jsonSource.drop(GERRIT_PREFIX_LEN).mkString)
+        .values
+        .asInstanceOf[Map[String, Map[String, String]]]
+        .mapValues(_ ("id"))
+        .toSeq)
+      .map {
+        case (id, name) => GerritProject(id, name)
+      }
 }
 
 case class ProjectContributionSource(name: String, contributorsUrl: String)
\ No newline at end of file
diff --git a/src/test/scala/com/gerritforge/analytics/GerritAnalyticsTransformationsSpec.scala b/src/test/scala/com/gerritforge/analytics/GerritAnalyticsTransformationsSpec.scala
index aa4cc3a..0e7702f 100644
--- a/src/test/scala/com/gerritforge/analytics/GerritAnalyticsTransformationsSpec.scala
+++ b/src/test/scala/com/gerritforge/analytics/GerritAnalyticsTransformationsSpec.scala
@@ -17,7 +17,7 @@
 import java.io.{File, FileWriter}
 
 import com.gerritforge.analytics.engine.GerritAnalyticsTransformations._
-import com.gerritforge.analytics.model.{GerritEndpointConfig, GerritProjects, ProjectContributionSource}
+import com.gerritforge.analytics.model.{GerritEndpointConfig, GerritProject, GerritProjectsRDD, ProjectContributionSource}
 import org.apache.spark.sql.Row
 import org.json4s.JsonDSL._
 import org.json4s._
@@ -29,28 +29,29 @@
 class GerritAnalyticsTransformationsSpec extends FlatSpec with Matchers
   with SparkTestSupport with Inside {
 
-  "GerritProjects" should "parse project names" in {
+  "GerritProjects" should "parse JSON into a GerritProject objects" in {
 
-    val projectNames = GerritProjects(Source.fromString(
+    val projects = GerritProjectsRDD(Source.fromString(
       """)]}'
         |{
-        | "All-Projects": {
-        |   "id": "All-Projects",
+        | "All-Projects-name": {
+        |   "id": "All-Projects-id",
         | },
-        | "Test": {
-        |   "id": "Test",
+        | "Test-name": {
+        |   "id": "Test-id",
         | }
         |}
-        |""".stripMargin))
+        |""".stripMargin)).collect()
 
-    projectNames should have size 2
-    projectNames should contain allOf("All-Projects", "Test")
+    projects should have size 2
+    projects should contain
+    allOf(GerritProject("All-Projects-id", "All-Projects-name"), GerritProject("Test-id", "Test-name"))
   }
 
 
   "enrichWithSource" should "enrich project RDD object with its source" in {
 
-    val projectRdd = sc.parallelize(Seq("project"))
+    val projectRdd = sc.parallelize(Seq(GerritProject("project-id", "project-name")))
     implicit val config = GerritEndpointConfig("http://somewhere.com")
 
     val projectWithSource = projectRdd
@@ -59,8 +60,8 @@
 
     projectWithSource should have size 1
     inside(projectWithSource.head) {
-      case ProjectContributionSource(project, url) => {
-        project should be("project")
+      case ProjectContributionSource(projectName, url) => {
+        projectName should be("project-name")
         url should startWith("http://somewhere.com")
       }
     }
diff --git a/src/test/scala/com/gerritforge/analytics/model/GerritProjectsTest.scala b/src/test/scala/com/gerritforge/analytics/model/GerritProjectsTest.scala
deleted file mode 100644
index ab71352..0000000
--- a/src/test/scala/com/gerritforge/analytics/model/GerritProjectsTest.scala
+++ /dev/null
@@ -1,14 +0,0 @@
-package com.gerritforge.analytics.model
-
-import org.scalatest.{FlatSpec, Matchers}
-
-import scala.io.Source
-
-class GerritProjectsTest extends FlatSpec with Matchers {
-
-  "GerritProjects" should "use the 'id' as identifier" in {
-    val projectId = "apps%2Freviewit" // URL_Encode(project_key) => project_id (i.e.: app/reviewit => apps%2Freviewit)
-    val source = Source.fromString(GerritProjects.GERRIT_PREFIX + s"""{"app/reviewit": {"id":"$projectId"}}""")
-    GerritProjects(source) shouldBe Seq(projectId)
-  }
-}
\ No newline at end of file