diff --git a/core/pom.xml b/core/pom.xml
index a1bdd8ec68aebf4a7c1dae760f72bd5dacf5e4f6..d87e2bca030e312fe7365027d3a03e5f6e0b0253 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -266,6 +266,11 @@
       <artifactId>junit-interface</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.spark-project</groupId>
+      <artifactId>pyrolite</artifactId>
+      <version>2.0</version>
+    </dependency>
   </dependencies>
   <build>
     <outputDirectory>target/scala-${scala.binary.version}/classes</outputDirectory>
diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
index 32f1100406d741a46c27bc165e35cce9a0952491..f9d86fed34d0f558a75dd2686189b276eda2bab5 100644
--- a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
@@ -25,6 +25,8 @@ import java.util.{List => JList, ArrayList => JArrayList, Map => JMap, Collectio
 import scala.collection.JavaConversions._
 import scala.reflect.ClassTag
 
+import net.razorvine.pickle.{Pickler, Unpickler}
+
 import org.apache.spark._
 import org.apache.spark.api.java.{JavaSparkContext, JavaPairRDD, JavaRDD}
 import org.apache.spark.broadcast.Broadcast
@@ -284,6 +286,36 @@ private[spark] object PythonRDD {
     file.close()
   }
 
+  /**
+   * Convert an RDD of serialized Python dictionaries to Scala Maps
+   * TODO: Support more Python types.
+   */
+  def pythonToJavaMap(pyRDD: JavaRDD[Array[Byte]]): JavaRDD[Map[String, _]] = {
+    pyRDD.rdd.mapPartitions { iter =>
+      val unpickle = new Unpickler
+      // TODO: Figure out why flatMap is necessay for pyspark
+      iter.flatMap { row =>
+        unpickle.loads(row) match {
+          case objs: java.util.ArrayList[JMap[String, _] @unchecked] => objs.map(_.toMap)
+          // Incase the partition doesn't have a collection
+          case obj: JMap[String @unchecked, _] => Seq(obj.toMap)
+        }
+      }
+    }
+  }
+
+  /**
+   * Convert and RDD of Java objects to and RDD of serialized Python objects, that is usable by
+   * PySpark.
+   */
+  def javaToPython(jRDD: JavaRDD[Any]): JavaRDD[Array[Byte]] = {
+    jRDD.rdd.mapPartitions { iter =>
+      val pickle = new Pickler
+      iter.map { row =>
+        pickle.dumps(row)
+      }
+    }
+  }
 }
 
 private
diff --git a/dev/run-tests b/dev/run-tests
index 6ad674a2ba1277824e481af18486fcfc1b0e7553..0725b681f1a1bf60e6490c56e9efd97ce1f7c265 100755
--- a/dev/run-tests
+++ b/dev/run-tests
@@ -34,6 +34,7 @@ else
 fi
 JAVA_VERSION=$($java_cmd -version 2>&1 | sed 's/java version "\(.*\)\.\(.*\)\..*"/\1\2/; 1q')
 [ "$JAVA_VERSION" -ge 18 ] && echo "" || echo "[Warn] Java 8 tests will not run because JDK version is < 1.8."
+export SPARK_HIVE=true
 
 echo "========================================================================="
 echo "Running Apache RAT checks"
diff --git a/docs/README.md b/docs/README.md
index 0678fc5c86706ad374abc9feeabaadd305b8f289..75b1811ba99af77cf25b774c7fa3a22fd97c52c5 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -42,7 +42,7 @@ To mark a block of code in your markdown to be syntax highlighted by jekyll duri
 
 You can build just the Spark scaladoc by running `sbt/sbt doc` from the SPARK_PROJECT_ROOT directory.
 
-Similarly, you can build just the PySpark epydoc by running `epydoc --config epydoc.conf` from the SPARK_PROJECT_ROOT/pyspark directory.
+Similarly, you can build just the PySpark epydoc by running `epydoc --config epydoc.conf` from the SPARK_PROJECT_ROOT/pyspark directory. Documentation is only generated for classes that are listed as public in `__init__.py`.
 
 When you run `jekyll` in the docs directory, it will also copy over the scaladoc for the various Spark subprojects into the docs directory (and then also into the _site directory). We use a jekyll plugin to run `sbt/sbt doc` before building the site so if you haven't run it (recently) it may take some time as it generates all of the scaladoc.  The jekyll plugin also generates the PySpark docs using [epydoc](http://epydoc.sourceforge.net/).
 
diff --git a/docs/_plugins/copy_api_dirs.rb b/docs/_plugins/copy_api_dirs.rb
index bbd56d2fd13bb651a6a114a0118cc9930abf0a56..05f0bd47a88a58142e62bc94a35d8e08e4e834ab 100644
--- a/docs/_plugins/copy_api_dirs.rb
+++ b/docs/_plugins/copy_api_dirs.rb
@@ -32,8 +32,8 @@ if not (ENV['SKIP_API'] == '1' or ENV['SKIP_SCALADOC'] == '1')
   curr_dir = pwd
   cd("..")
 
-  puts "Running sbt/sbt doc from " + pwd + "; this may take a few minutes..."
-  puts `sbt/sbt doc`
+  puts "Running 'sbt/sbt doc hive/doc' from " + pwd + "; this may take a few minutes..."
+  puts `sbt/sbt doc hive/doc`
 
   puts "Moving back into docs dir."
   cd("docs")
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index a59393e1424debf5bebf88de6ffb2523b4654368..6f616fb7c244891e79ccc169c926eddf121e98fe 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -20,7 +20,7 @@ a schema that describes the data types of each column in the row.  A SchemaRDD i
 in a traditional relational database.  A SchemaRDD can be created from an existing RDD, parquet
 file, or by running HiveQL against data stored in [Apache Hive](http://hive.apache.org/).
 
-**All of the examples on this page use sample data included in the Spark distribution and can be run in the spark-shell.**
+**All of the examples on this page use sample data included in the Spark distribution and can be run in the `spark-shell`.**
 
 </div>
 
@@ -33,6 +33,19 @@ a schema that describes the data types of each column in the row.  A JavaSchemaR
 in a traditional relational database.  A JavaSchemaRDD can be created from an existing RDD, parquet
 file, or by running HiveQL against data stored in [Apache Hive](http://hive.apache.org/).
 </div>
+
+<div data-lang="python"  markdown="1">
+
+Spark SQL allows relational queries expressed in SQL or HiveQL to be executed using
+Spark.  At the core of this component is a new type of RDD,
+[SchemaRDD](api/pyspark/pyspark.sql.SchemaRDD-class.html).  SchemaRDDs are composed
+[Row](api/pyspark/pyspark.sql.Row-class.html) objects along with
+a schema that describes the data types of each column in the row.  A SchemaRDD is similar to a table
+in a traditional relational database.  A SchemaRDD can be created from an existing RDD, parquet
+file, or by running HiveQL against data stored in [Apache Hive](http://hive.apache.org/).
+
+**All of the examples on this page use sample data included in the Spark distribution and can be run in the `pyspark` shell.**
+</div>
 </div>
 
 ***************************************************************************************************
@@ -44,7 +57,7 @@ file, or by running HiveQL against data stored in [Apache Hive](http://hive.apac
 
 The entry point into all relational functionality in Spark is the
 [SQLContext](api/sql/core/index.html#org.apache.spark.sql.SQLContext) class, or one of its
-decendents.  To create a basic SQLContext, all you need is a SparkContext.
+descendants.  To create a basic SQLContext, all you need is a SparkContext.
 
 {% highlight scala %}
 val sc: SparkContext // An existing SparkContext.
@@ -60,7 +73,7 @@ import sqlContext._
 
 The entry point into all relational functionality in Spark is the
 [JavaSQLContext](api/sql/core/index.html#org.apache.spark.sql.api.java.JavaSQLContext) class, or one
-of its decendents.  To create a basic JavaSQLContext, all you need is a JavaSparkContext.
+of its descendants.  To create a basic JavaSQLContext, all you need is a JavaSparkContext.
 
 {% highlight java %}
 JavaSparkContext ctx = ...; // An existing JavaSparkContext.
@@ -69,6 +82,19 @@ JavaSQLContext sqlCtx = new org.apache.spark.sql.api.java.JavaSQLContext(ctx);
 
 </div>
 
+<div data-lang="python"  markdown="1">
+
+The entry point into all relational functionality in Spark is the
+[SQLContext](api/pyspark/pyspark.sql.SQLContext-class.html) class, or one
+of its decedents.  To create a basic SQLContext, all you need is a SparkContext.
+
+{% highlight python %}
+from pyspark.sql import SQLContext
+sqlCtx = SQLContext(sc)
+{% endhighlight %}
+
+</div>
+
 </div>
 
 ## Running SQL on RDDs
@@ -81,7 +107,7 @@ One type of table that is supported by Spark SQL is an RDD of Scala case classes
 defines the schema of the table.  The names of the arguments to the case class are read using
 reflection and become the names of the columns. Case classes can also be nested or contain complex
 types such as Sequences or Arrays. This RDD can be implicitly converted to a SchemaRDD and then be
-registered as a table.  Tables can used in subsequent SQL statements.
+registered as a table.  Tables can be used in subsequent SQL statements.
 
 {% highlight scala %}
 val sqlContext = new org.apache.spark.sql.SQLContext(sc)
@@ -176,6 +202,34 @@ List<String> teenagerNames = teenagers.map(new Function<Row, String>() {
 
 </div>
 
+<div data-lang="python"  markdown="1">
+
+One type of table that is supported by Spark SQL is an RDD of dictionaries.  The keys of the
+dictionary define the columns names of the table, and the types are inferred by looking at the first
+row. Any RDD of dictionaries can converted to a SchemaRDD and then registered as a table.  Tables
+can be used in subsequent SQL statements.
+
+{% highlight python %}
+# Load a text file and convert each line to a dictionary.
+lines = sc.textFile("examples/src/main/resources/people.txt")
+parts = lines.map(lambda l: l.split(","))
+people = parts.map(lambda p: {"name": p[0], "age": int(p[1])})
+
+# Infer the schema, and register the SchemaRDD as a table.
+# In future versions of PySpark we would like to add support for registering RDDs with other
+# datatypes as tables
+peopleTable = sqlCtx.inferSchema(people)
+peopleTable.registerAsTable("people")
+
+# SQL can be run over SchemaRDDs that have been registered as a table.
+teenagers = sqlCtx.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
+
+# The results of SQL queries are RDDs and support all the normal RDD operations.
+teenNames = teenagers.map(lambda p: "Name: " + p.name)
+{% endhighlight %}
+
+</div>
+
 </div>
 
 **Note that Spark SQL currently uses a very basic SQL parser.**
@@ -231,6 +285,27 @@ parquetFile.registerAsTable("parquetFile");
 JavaSchemaRDD teenagers = sqlCtx.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19");
 
 
+{% endhighlight %}
+
+</div>
+
+<div data-lang="python"  markdown="1">
+
+{% highlight python %}
+
+peopleTable # The SchemaRDD from the previous example.
+
+# SchemaRDDs can be saved as parquet files, maintaining the schema information.
+peopleTable.saveAsParquetFile("people.parquet")
+
+# Read in the parquet file created above.  Parquet files are self-describing so the schema is preserved.
+# The result of loading a parquet file is also a SchemaRDD.
+parquetFile = sqlCtx.parquetFile("people.parquet")
+
+# Parquet files can also be registered as tables and then used in SQL statements.
+parquetFile.registerAsTable("parquetFile");
+teenagers = sqlCtx.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
+
 {% endhighlight %}
 
 </div>
@@ -318,4 +393,24 @@ Row[] results = hiveCtx.hql("FROM src SELECT key, value").collect();
 
 </div>
 
+<div data-lang="python"  markdown="1">
+
+When working with Hive one must construct a `HiveContext`, which inherits from `SQLContext`, and
+adds support for finding tables in in the MetaStore and writing queries using HiveQL. In addition to
+the `sql` method a `HiveContext` also provides an `hql` methods, which allows queries to be
+expressed in HiveQL.
+
+{% highlight python %}
+
+from pyspark.sql import HiveContext
+hiveCtx = HiveContext(sc)
+
+hiveCtx.hql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
+hiveCtx.hql("LOAD DATA LOCAL INPATH 'examples/src/main/resources/kv1.txt' INTO TABLE src")
+
+# Queries can be expressed in HiveQL.
+results = hiveCtx.hql("FROM src SELECT key, value").collect()
+
+{% endhighlight %}
+
 </div>
diff --git a/pom.xml b/pom.xml
index 0eacedf7a65334ee9a70f5da2fd93bc08648db7c..cd204376de5db5c8ab5cbea14c2b0e10b411c0b2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -262,7 +262,7 @@
       <dependency>
         <groupId>com.clearspring.analytics</groupId>
         <artifactId>stream</artifactId>
-        <version>2.4.0</version>
+        <version>2.5.1</version>
       </dependency>
       <!-- In theory we need not directly depend on protobuf since Spark does not directly
            use it. However, when building with Hadoop/YARN 2.2 Maven doesn't correctly bump
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index aac07b9f6129d8226ec6acc3443b3fb1bd987f92..09b527c76a5aea59563ac6df48864c30e047588e 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -345,7 +345,8 @@ object SparkBuild extends Build {
         "com.twitter"               %% "chill"            % chillVersion excludeAll(excludeAsm),
         "com.twitter"                % "chill-java"       % chillVersion excludeAll(excludeAsm),
         "org.tachyonproject"         % "tachyon"          % "0.4.1-thrift" excludeAll(excludeHadoop, excludeCurator, excludeEclipseJetty, excludePowermock),
-        "com.clearspring.analytics"  % "stream"           % "2.5.1"
+        "com.clearspring.analytics"  % "stream"           % "2.5.1",
+        "org.spark-project"          % "pyrolite"         % "2.0"
       ),
     libraryDependencies ++= maybeAvro
   )
diff --git a/python/pyspark/__init__.py b/python/pyspark/__init__.py
index a51d5af79b0dafd6e685b21d76efd69405e613aa..73fe7378ffa631b48d9322f2f0156491f584295e 100644
--- a/python/pyspark/__init__.py
+++ b/python/pyspark/__init__.py
@@ -34,6 +34,19 @@ Public classes:
       Access files shipped with jobs.
   - L{StorageLevel<pyspark.storagelevel.StorageLevel>}
       Finer-grained cache persistence levels.
+
+Spark SQL:
+  - L{SQLContext<pyspark.sql.SQLContext>}
+      Main entry point for SQL functionality.
+  - L{SchemaRDD<pyspark.sql.SchemaRDD>}
+      A Resilient Distributed Dataset (RDD) with Schema information for the data contained. In
+      addition to normal RDD operations, SchemaRDDs also support SQL.
+  - L{Row<pyspark.sql.Row>}
+      A Row of data returned by a Spark SQL query.
+
+Hive:
+  - L{HiveContext<pyspark.context.HiveContext>}
+      Main entry point for accessing data stored in Apache Hive..
 """
 
 
@@ -45,9 +58,12 @@ sys.path.insert(0, os.path.join(os.environ["SPARK_HOME"], "python/lib/py4j-0.8.1
 
 from pyspark.conf import SparkConf
 from pyspark.context import SparkContext
+from pyspark.sql import SQLContext
 from pyspark.rdd import RDD
+from pyspark.sql import SchemaRDD
+from pyspark.sql import Row
 from pyspark.files import SparkFiles
 from pyspark.storagelevel import StorageLevel
 
 
-__all__ = ["SparkConf", "SparkContext", "RDD", "SparkFiles", "StorageLevel"]
+__all__ = ["SparkConf", "SparkContext", "SQLContext", "RDD", "SchemaRDD", "SparkFiles", "StorageLevel", "Row"]
diff --git a/python/pyspark/java_gateway.py b/python/pyspark/java_gateway.py
index 6a16756e0576d62391280e10798ce7098214c2ca..6bb6c877c942d4006ad1aca2ae45c3e88c98b2d5 100644
--- a/python/pyspark/java_gateway.py
+++ b/python/pyspark/java_gateway.py
@@ -64,5 +64,9 @@ def launch_gateway():
     java_import(gateway.jvm, "org.apache.spark.api.java.*")
     java_import(gateway.jvm, "org.apache.spark.api.python.*")
     java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
+    java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
+    java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
+    java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
+    java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
     java_import(gateway.jvm, "scala.Tuple2")
     return gateway
diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..67e6eee3f4bd1c5dfbe7ef10955913fa3966aec9
--- /dev/null
+++ b/python/pyspark/sql.py
@@ -0,0 +1,363 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from pyspark.rdd import RDD
+
+from py4j.protocol import Py4JError
+
+__all__ = ["SQLContext", "HiveContext", "LocalHiveContext", "TestHiveContext", "SchemaRDD", "Row"]
+
+
+class SQLContext:
+    """
+    Main entry point for SparkSQL functionality. A SQLContext can be used create L{SchemaRDD}s,
+    register L{SchemaRDD}s as tables, execute sql over tables, cache tables, and read parquet files.
+    """
+
+    def __init__(self, sparkContext):
+        """
+        Create a new SQLContext.
+
+        @param sparkContext: The SparkContext to wrap.
+
+        >>> srdd = sqlCtx.inferSchema(rdd)
+        >>> sqlCtx.inferSchema(srdd) # doctest: +IGNORE_EXCEPTION_DETAIL
+        Traceback (most recent call last):
+            ...
+        ValueError:...
+
+        >>> bad_rdd = sc.parallelize([1,2,3])
+        >>> sqlCtx.inferSchema(bad_rdd) # doctest: +IGNORE_EXCEPTION_DETAIL
+        Traceback (most recent call last):
+            ...
+        ValueError:...
+
+        >>> allTypes = sc.parallelize([{"int" : 1, "string" : "string", "double" : 1.0, "long": 1L,
+        ... "boolean" : True}])
+        >>> srdd = sqlCtx.inferSchema(allTypes).map(lambda x: (x.int, x.string, x.double, x.long,
+        ... x.boolean))
+        >>> srdd.collect()[0]
+        (1, u'string', 1.0, 1, True)
+        """
+        self._sc = sparkContext
+        self._jsc = self._sc._jsc
+        self._jvm = self._sc._jvm
+        self._pythonToJavaMap = self._jvm.PythonRDD.pythonToJavaMap
+
+    @property
+    def _ssql_ctx(self):
+        """
+        Accessor for the JVM SparkSQL context.  Subclasses can overrite this property to provide
+        their own JVM Contexts.
+        """
+        if not hasattr(self, '_scala_SQLContext'):
+            self._scala_SQLContext = self._jvm.SQLContext(self._jsc.sc())
+        return self._scala_SQLContext
+
+    def inferSchema(self, rdd):
+        """
+        Infer and apply a schema to an RDD of L{dict}s. We peek at the first row of the RDD to
+        determine the fields names and types, and then use that to extract all the dictionaries.
+
+        >>> srdd = sqlCtx.inferSchema(rdd)
+        >>> srdd.collect() == [{"field1" : 1, "field2" : "row1"}, {"field1" : 2, "field2": "row2"},
+        ...                    {"field1" : 3, "field2": "row3"}]
+        True
+        """
+        if (rdd.__class__ is SchemaRDD):
+            raise ValueError("Cannot apply schema to %s" % SchemaRDD.__name__)
+        elif not isinstance(rdd.first(), dict):
+            raise ValueError("Only RDDs with dictionaries can be converted to %s: %s" %
+                             (SchemaRDD.__name__, rdd.first()))
+
+        jrdd = self._pythonToJavaMap(rdd._jrdd)
+        srdd = self._ssql_ctx.inferSchema(jrdd.rdd())
+        return SchemaRDD(srdd, self)
+
+    def registerRDDAsTable(self, rdd, tableName):
+        """
+        Registers the given RDD as a temporary table in the catalog.  Temporary tables exist only
+        during the lifetime of this instance of SQLContext.
+
+        >>> srdd = sqlCtx.inferSchema(rdd)
+        >>> sqlCtx.registerRDDAsTable(srdd, "table1")
+        """
+        if (rdd.__class__ is SchemaRDD):
+            jschema_rdd = rdd._jschema_rdd
+            self._ssql_ctx.registerRDDAsTable(jschema_rdd, tableName)
+        else:
+            raise ValueError("Can only register SchemaRDD as table")
+
+    def parquetFile(self, path):
+        """
+        Loads a Parquet file, returning the result as a L{SchemaRDD}.
+
+        >>> srdd = sqlCtx.inferSchema(rdd)
+        >>> srdd.saveAsParquetFile("/tmp/tmp.parquet")
+        >>> srdd2 = sqlCtx.parquetFile("/tmp/tmp.parquet")
+        >>> srdd.collect() == srdd2.collect()
+        True
+        """
+        jschema_rdd = self._ssql_ctx.parquetFile(path)
+        return SchemaRDD(jschema_rdd, self)
+
+    def sql(self, sqlQuery):
+        """
+        Executes a SQL query using Spark, returning the result as a L{SchemaRDD}.
+
+        >>> srdd = sqlCtx.inferSchema(rdd)
+        >>> sqlCtx.registerRDDAsTable(srdd, "table1")
+        >>> srdd2 = sqlCtx.sql("SELECT field1 AS f1, field2 as f2 from table1")
+        >>> srdd2.collect() == [{"f1" : 1, "f2" : "row1"}, {"f1" : 2, "f2": "row2"},
+        ...                     {"f1" : 3, "f2": "row3"}]
+        True
+        """
+        return SchemaRDD(self._ssql_ctx.sql(sqlQuery), self)
+
+    def table(self, tableName):
+        """
+        Returns the specified table as a L{SchemaRDD}.
+
+        >>> srdd = sqlCtx.inferSchema(rdd)
+        >>> sqlCtx.registerRDDAsTable(srdd, "table1")
+        >>> srdd2 = sqlCtx.table("table1")
+        >>> srdd.collect() == srdd2.collect()
+        True
+        """
+        return SchemaRDD(self._ssql_ctx.table(tableName), self)
+
+    def cacheTable(tableName):
+        """
+        Caches the specified table in-memory.
+        """
+        self._ssql_ctx.cacheTable(tableName)
+
+    def uncacheTable(tableName):
+        """
+        Removes the specified table from the in-memory cache.
+        """
+        self._ssql_ctx.uncacheTable(tableName)
+
+
+class HiveContext(SQLContext):
+    """
+    An instance of the Spark SQL execution engine that integrates with data stored in Hive.
+    Configuration for Hive is read from hive-site.xml on the classpath. It supports running both SQL
+    and HiveQL commands.
+    """
+
+    @property
+    def _ssql_ctx(self):
+        try:
+            if not hasattr(self, '_scala_HiveContext'):
+                self._scala_HiveContext = self._get_hive_ctx()
+            return self._scala_HiveContext
+        except Py4JError as e:
+            raise Exception("You must build Spark with Hive. Export 'SPARK_HIVE=true' and run " \
+                            "sbt/sbt assembly" , e)
+
+    def _get_hive_ctx(self):
+        return self._jvm.HiveContext(self._jsc.sc())
+
+    def hiveql(self, hqlQuery):
+        """
+        Runs a query expressed in HiveQL, returning the result as a L{SchemaRDD}.
+        """
+        return SchemaRDD(self._ssql_ctx.hiveql(hqlQuery), self)
+
+    def hql(self, hqlQuery):
+        """
+        Runs a query expressed in HiveQL, returning the result as a L{SchemaRDD}.
+        """
+        return self.hiveql(hqlQuery)
+
+
+class LocalHiveContext(HiveContext):
+    """
+    Starts up an instance of hive where metadata is stored locally. An in-process metadata data is
+    created with data stored in ./metadata.  Warehouse data is stored in in ./warehouse.
+
+    >>> import os
+    >>> hiveCtx = LocalHiveContext(sc)
+    >>> try:
+    ...     supress = hiveCtx.hql("DROP TABLE src")
+    ... except Exception:
+    ...     pass
+    >>> kv1 = os.path.join(os.environ["SPARK_HOME"], 'examples/src/main/resources/kv1.txt')
+    >>> supress = hiveCtx.hql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
+    >>> supress = hiveCtx.hql("LOAD DATA LOCAL INPATH '%s' INTO TABLE src" % kv1)
+    >>> results = hiveCtx.hql("FROM src SELECT value").map(lambda r: int(r.value.split('_')[1]))
+    >>> num = results.count()
+    >>> reduce_sum = results.reduce(lambda x, y: x + y)
+    >>> num
+    500
+    >>> reduce_sum
+    130091
+    """
+
+    def _get_hive_ctx(self):
+        return self._jvm.LocalHiveContext(self._jsc.sc())
+
+
+class TestHiveContext(HiveContext):
+
+    def _get_hive_ctx(self):
+        return self._jvm.TestHiveContext(self._jsc.sc())
+
+
+# TODO: Investigate if it is more efficient to use a namedtuple. One problem is that named tuples
+# are custom classes that must be generated per Schema.
+class Row(dict):
+    """
+    An extended L{dict} that takes a L{dict} in its constructor, and exposes those items as fields.
+
+    >>> r = Row({"hello" : "world", "foo" : "bar"})
+    >>> r.hello
+    'world'
+    >>> r.foo
+    'bar'
+    """
+
+    def __init__(self, d):
+        d.update(self.__dict__)
+        self.__dict__ = d
+        dict.__init__(self, d)
+
+
+class SchemaRDD(RDD):
+    """
+    An RDD of L{Row} objects that has an associated schema. The underlying JVM object is a SchemaRDD,
+    not a PythonRDD, so we can utilize the relational query api exposed by SparkSQL.
+
+    For normal L{pyspark.rdd.RDD} operations (map, count, etc.) the L{SchemaRDD} is not operated on
+    directly, as it's underlying implementation is a RDD composed of Java objects. Instead it is
+    converted to a PythonRDD in the JVM, on which Python operations can be done.
+    """
+
+    def __init__(self, jschema_rdd, sql_ctx):
+        self.sql_ctx = sql_ctx
+        self._sc = sql_ctx._sc
+        self._jschema_rdd = jschema_rdd
+
+        self.is_cached = False
+        self.is_checkpointed = False
+        self.ctx = self.sql_ctx._sc
+        self._jrdd_deserializer = self.ctx.serializer
+
+    @property
+    def _jrdd(self):
+        """
+        Lazy evaluation of PythonRDD object. Only done when a user calls methods defined by the
+        L{pyspark.rdd.RDD} super class (map, count, etc.).
+        """
+        if not hasattr(self, '_lazy_jrdd'):
+            self._lazy_jrdd = self._toPython()._jrdd
+        return self._lazy_jrdd
+
+    @property
+    def _id(self):
+        return self._jrdd.id()
+
+    def saveAsParquetFile(self, path):
+        """
+        Saves the contents of this L{SchemaRDD} as a parquet file, preserving the schema.  Files
+        that are written out using this method can be read back in as a SchemaRDD using the
+        L{SQLContext.parquetFile} method.
+
+        >>> srdd = sqlCtx.inferSchema(rdd)
+        >>> srdd.saveAsParquetFile("/tmp/test.parquet")
+        >>> srdd2 = sqlCtx.parquetFile("/tmp/test.parquet")
+        >>> srdd2.collect() == srdd.collect()
+        True
+        """
+        self._jschema_rdd.saveAsParquetFile(path)
+
+    def registerAsTable(self, name):
+        """
+        Registers this RDD as a temporary table using the given name.  The lifetime of this temporary
+        table is tied to the L{SQLContext} that was used to create this SchemaRDD.
+
+        >>> srdd = sqlCtx.inferSchema(rdd)
+        >>> srdd.registerAsTable("test")
+        >>> srdd2 = sqlCtx.sql("select * from test")
+        >>> srdd.collect() == srdd2.collect()
+        True
+        """
+        self._jschema_rdd.registerAsTable(name)
+
+    def _toPython(self):
+        # We have to import the Row class explicitly, so that the reference Pickler has is
+        # pyspark.sql.Row instead of __main__.Row
+        from pyspark.sql import Row
+        jrdd = self._jschema_rdd.javaToPython()
+        # TODO: This is inefficient, we should construct the Python Row object
+        # in Java land in the javaToPython function. May require a custom
+        # pickle serializer in Pyrolite
+        return RDD(jrdd, self._sc, self._sc.serializer).map(lambda d: Row(d))
+
+    # We override the default cache/persist/checkpoint behavior as we want to cache the underlying
+    # SchemaRDD object in the JVM, not the PythonRDD checkpointed by the super class
+    def cache(self):
+        self.is_cached = True
+        self._jschema_rdd.cache()
+        return self
+
+    def persist(self, storageLevel):
+        self.is_cached = True
+        javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
+        self._jschema_rdd.persist(javaStorageLevel)
+        return self
+
+    def unpersist(self):
+        self.is_cached = False
+        self._jschema_rdd.unpersist()
+        return self
+
+    def checkpoint(self):
+        self.is_checkpointed = True
+        self._jschema_rdd.checkpoint()
+
+    def isCheckpointed(self):
+        return self._jschema_rdd.isCheckpointed()
+
+    def getCheckpointFile(self):
+        checkpointFile = self._jschema_rdd.getCheckpointFile()
+        if checkpointFile.isDefined():
+            return checkpointFile.get()
+        else:
+            return None
+
+def _test():
+    import doctest
+    from pyspark.context import SparkContext
+    globs = globals().copy()
+    # The small batch size here ensures that we see multiple batches,
+    # even in these small test examples:
+    sc = SparkContext('local[4]', 'PythonTest', batchSize=2)
+    globs['sc'] = sc
+    globs['sqlCtx'] = SQLContext(sc)
+    globs['rdd'] = sc.parallelize([{"field1" : 1, "field2" : "row1"},
+        {"field1" : 2, "field2": "row2"}, {"field1" : 3, "field2": "row3"}])
+    (failure_count, test_count) = doctest.testmod(globs=globs,optionflags=doctest.ELLIPSIS)
+    globs['sc'].stop()
+    if failure_count:
+        exit(-1)
+
+
+if __name__ == "__main__":
+    _test()
+
diff --git a/python/run-tests b/python/run-tests
index b2b60f08b48e2dadd2fae55fcbe72405656253d1..dabb714da9f5bdb3eee112a58ddf241f9bec5a7a 100755
--- a/python/run-tests
+++ b/python/run-tests
@@ -28,6 +28,9 @@ FAILED=0
 
 rm -f unit-tests.log
 
+# Remove the metastore and warehouse directory created by the HiveContext tests in SparkSQL
+rm -rf metastore warehouse
+
 function run_test() {
     SPARK_TESTING=0 $FWDIR/bin/pyspark $1 2>&1 | tee -a > unit-tests.log
     FAILED=$((PIPESTATUS[0]||$FAILED))
@@ -46,6 +49,7 @@ function run_test() {
 run_test "pyspark/rdd.py"
 run_test "pyspark/context.py"
 run_test "pyspark/conf.py"
+run_test "pyspark/sql.py"
 run_test "-m doctest pyspark/broadcast.py"
 run_test "-m doctest pyspark/accumulators.py"
 run_test "-m doctest pyspark/serializers.py"
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index d3d4c56bafe411cb0d675c600f265f811b3e8d49..24d60ea074296d7e91a99858bac4c4bad2ee6b66 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -26,6 +26,7 @@ import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.catalyst.analysis._
 import org.apache.spark.sql.catalyst.dsl
 import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.types._
 import org.apache.spark.sql.catalyst.optimizer.Optimizer
 import org.apache.spark.sql.catalyst.plans.logical.{Subquery, LogicalPlan}
 import org.apache.spark.sql.catalyst.rules.RuleExecutor
@@ -241,4 +242,30 @@ class SQLContext(@transient val sparkContext: SparkContext)
      */
     def debugExec() = DebugQuery(executedPlan).execute().collect()
   }
+
+  /**
+   * Peek at the first row of the RDD and infer its schema.
+   * TODO: We only support primitive types, add support for nested types.
+   */
+  private[sql] def inferSchema(rdd: RDD[Map[String, _]]): SchemaRDD = {
+    val schema = rdd.first.map { case (fieldName, obj) =>
+      val dataType = obj.getClass match {
+        case c: Class[_] if c == classOf[java.lang.String] => StringType
+        case c: Class[_] if c == classOf[java.lang.Integer] => IntegerType
+        case c: Class[_] if c == classOf[java.lang.Long] => LongType
+        case c: Class[_] if c == classOf[java.lang.Double] => DoubleType
+        case c: Class[_] if c == classOf[java.lang.Boolean] => BooleanType
+        case c => throw new Exception(s"Object of type $c cannot be used")
+      }
+      AttributeReference(fieldName, dataType, true)()
+    }.toSeq
+
+    val rowRdd = rdd.mapPartitions { iter =>
+      iter.map { map =>
+        new GenericRow(map.values.toArray.asInstanceOf[Array[Any]]): Row
+      }
+    }
+    new SchemaRDD(this, SparkLogicalPlan(ExistingRdd(schema, rowRdd)))
+  }
+
 }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
index 91500416eefaafa73693b9f34b084abcf48710e7..a771147f9067635bec795504c78355bf7a2176ea 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql
 
+import net.razorvine.pickle.Pickler
+
 import org.apache.spark.{Dependency, OneToOneDependency, Partition, TaskContext}
 import org.apache.spark.annotation.{AlphaComponent, Experimental}
 import org.apache.spark.rdd.RDD
@@ -25,6 +27,8 @@ import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.plans.{Inner, JoinType}
 import org.apache.spark.sql.catalyst.types.BooleanType
+import org.apache.spark.api.java.JavaRDD
+import java.util.{Map => JMap}
 
 /**
  * :: AlphaComponent ::
@@ -308,4 +312,23 @@ class SchemaRDD(
 
   /** FOR INTERNAL USE ONLY */
   def analyze = sqlContext.analyzer(logicalPlan)
+
+  private[sql] def javaToPython: JavaRDD[Array[Byte]] = {
+    val fieldNames: Seq[String] = this.queryExecution.analyzed.output.map(_.name)
+    this.mapPartitions { iter =>
+      val pickle = new Pickler
+      iter.map { row =>
+        val map: JMap[String, Any] = new java.util.HashMap
+        // TODO: We place the map in an ArrayList so that the object is pickled to a List[Dict].
+        // Ideally we should be able to pickle an object directly into a Python collection so we
+        // don't have to create an ArrayList every time.
+        val arr: java.util.ArrayList[Any] = new java.util.ArrayList
+        row.zip(fieldNames).foreach { case (obj, name) =>
+          map.put(name, obj)
+        }
+        arr.add(map)
+        pickle.dumps(arr)
+      }
+    }
+  }
 }
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
index 465e5f146fe71750fdf8442c48a0f07cc3415427..444bbfb4dd93497a1468abac6c50b31fdbd6171d 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
@@ -261,8 +261,9 @@ class TestHiveContext(sc: SparkContext) extends LocalHiveContext(sc) {
         testTables.get(name).map(_.commands).getOrElse(sys.error(s"Unknown test table $name"))
       createCmds.foreach(_())
 
-      if (cacheTables)
+      if (cacheTables) {
         cacheTable(name)
+      }
     }
   }
 
diff --git a/sql/hive/src/test/resources/log4j.properties b/sql/hive/src/test/resources/log4j.properties
index 5e17e3b596ba1b9a3e55cc08a220d7020e95e7cf..c07d8fedf199352c7741dea8fa0f6904b8bec8aa 100644
--- a/sql/hive/src/test/resources/log4j.properties
+++ b/sql/hive/src/test/resources/log4j.properties
@@ -45,3 +45,6 @@ log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=OFF
 log4j.additivity.hive.ql.metadata.Hive=false
 log4j.logger.hive.ql.metadata.Hive=OFF
 
+log4j.additivity.org.apache.hadoop.hive.ql.io.RCFile=false
+log4j.logger.org.apache.hadoop.hive.ql.io.RCFile=ERROR
+
diff --git a/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala b/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala
index 5547e9fe58fc7d2548e894ca62cfd9d0feccc2b3..3fb85e1ff73a8f8180c6ba07d9219f791c21aca2 100644
--- a/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala
+++ b/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala
@@ -99,7 +99,9 @@ object GenerateMIMAIgnore {
     // Heuristic to remove JVM classes that do not correspond to user-facing classes in Scala
     name.contains("anon") ||
     name.endsWith("$class") ||
-    name.contains("$sp")
+    name.contains("$sp") ||
+    name.contains("hive") ||
+    name.contains("Hive")
   }
 
   /**