diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 1b87e4e98bd83dd2ad447ccae7b71f813f19de12..b9515a12bc57351af0f59c9f31a606e4956d6148 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -324,6 +324,7 @@ object Hive {
         |import org.apache.spark.sql.functions._
         |import org.apache.spark.sql.hive._
         |import org.apache.spark.sql.hive.test.TestHive._
+        |import org.apache.spark.sql.hive.test.TestHive.implicits._
         |import org.apache.spark.sql.types._""".stripMargin,
     cleanupCommands in console := "sparkContext.stop()",
     // Some of our log4j jars make it impossible to submit jobs from this JVM to Hive Map/Reduce
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
index 4190b7ffe1c8f6539c7e4dcb237d6d3ac25cb629..0d460b634d9b0f52551bf093979bc5806e2b6cd6 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
@@ -55,6 +55,9 @@ object Row {
     // TODO: Improve the performance of this if used in performance critical part.
     new GenericRow(rows.flatMap(_.toSeq).toArray)
   }
+
+  /** Returns an empty row. */
+  val empty = apply()
 }
 
 
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala b/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
index 0ded1cce68391f1217ca0bfd88cf880ac5cdba45..a59d42cdd60282a2d3f61d2f2ba43f925548bd4c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
@@ -314,7 +314,7 @@ private[sql] case class InMemoryColumnarTableScan(
                 columnAccessors(i).extractTo(nextRow, i)
                 i += 1
               }
-              nextRow
+              if (attributes.isEmpty) Row.empty else nextRow
             }
 
             override def hasNext: Boolean = columnAccessors(0).hasNext
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index c5c4f448a722487c8dd346f2e857196e551d7a34..7c47fe454b6dc3d58d0193638dcbeac1aee6ff86 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -39,6 +39,19 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
   import org.apache.spark.sql.test.TestSQLContext.implicits._
   val sqlCtx = TestSQLContext
 
+  test("SPARK-6743: no columns from cache") {
+    Seq(
+      (83, 0, 38),
+      (26, 0, 79),
+      (43, 81, 24)
+    ).toDF("a", "b", "c").registerTempTable("cachedData")
+
+    cacheTable("cachedData")
+    checkAnswer(
+      sql("SELECT t1.b FROM cachedData, cachedData t1 GROUP BY t1.b"),
+      Row(0) :: Row(81) :: Nil)
+  }
+
   test("self join with aliases") {
     Seq(1,2,3).map(i => (i, i.toString)).toDF("int", "str").registerTempTable("df")
 
@@ -142,7 +155,7 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
       sql("SELECT ABS(2.5)"),
       Row(2.5))
   }
-  
+
   test("aggregation with codegen") {
     val originalValue = conf.codegenEnabled
     setConf(SQLConf.CODEGEN_ENABLED, "true")
@@ -194,7 +207,7 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
       "SELECT value, sum(key) FROM testData3x GROUP BY value",
       (1 to 100).map(i => Row(i.toString, 3 * i)))
     testCodeGen(
-      "SELECT sum(key), SUM(CAST(key as Double)) FROM testData3x",      
+      "SELECT sum(key), SUM(CAST(key as Double)) FROM testData3x",
       Row(5050 * 3, 5050 * 3.0) :: Nil)
     // AVERAGE
     testCodeGen(