spark-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From 郭同 (JIRA) <j...@apache.org>
Subject [jira] [Updated] (SPARK-15404) pyspark sql bug ,here is the testcase
Date Thu, 19 May 2016 11:39:12 GMT

     [ https://issues.apache.org/jira/browse/SPARK-15404?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]

郭同 updated SPARK-15404:
-----------------------
    Description: 

import os
import sys

from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import Row, StructField, StructType, StringType, IntegerType


if __name__ == "__main__":
    sc = SparkContext(appName="PythonSQL")
    sqlContext = SQLContext(sc)
    schema = StructType([StructField("person_name", StringType(), False),
                     StructField("person_age", IntegerType(), False)])

    some_rdd = sc.parallelize([Row(person_name="John", person_age=19),
                               Row(person_name="Smith", person_age=23),
                               Row(person_name="Sarah", person_age=18)])

    some_df = sqlContext.createDataFrame(some_rdd, schema)
    some_df.printSchema()

    some_df.registerAsTable("people")

    teenagers = sqlContext.sql("SELECT * FROM people ")

    for each in teenagers.collect():
        print(each)

    sc.stop()


  was:
```
import os
import sys

from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import Row, StructField, StructType, StringType, IntegerType


if __name__ == "__main__":
    sc = SparkContext(appName="PythonSQL")
    sqlContext = SQLContext(sc)
    schema = StructType([StructField("person_name", StringType(), False),
                     StructField("person_age", IntegerType(), False)])

    some_rdd = sc.parallelize([Row(person_name="John", person_age=19),
                               Row(person_name="Smith", person_age=23),
                               Row(person_name="Sarah", person_age=18)])

    some_df = sqlContext.createDataFrame(some_rdd, schema)
    some_df.printSchema()

    some_df.registerAsTable("people")

    teenagers = sqlContext.sql("SELECT * FROM people ")

    for each in teenagers.collect():
        print(each)

    sc.stop()
```


> pyspark sql bug ,here is the testcase
> -------------------------------------
>
>                 Key: SPARK-15404
>                 URL: https://issues.apache.org/jira/browse/SPARK-15404
>             Project: Spark
>          Issue Type: Bug
>    Affects Versions: 1.6.0
>            Reporter: 郭同
>
> import os
> import sys
> from pyspark import SparkContext
> from pyspark.sql import SQLContext
> from pyspark.sql.types import Row, StructField, StructType, StringType, IntegerType
> if __name__ == "__main__":
>     sc = SparkContext(appName="PythonSQL")
>     sqlContext = SQLContext(sc)
>     schema = StructType([StructField("person_name", StringType(), False),
>                      StructField("person_age", IntegerType(), False)])
>     some_rdd = sc.parallelize([Row(person_name="John", person_age=19),
>                                Row(person_name="Smith", person_age=23),
>                                Row(person_name="Sarah", person_age=18)])
>     some_df = sqlContext.createDataFrame(some_rdd, schema)
>     some_df.printSchema()
>     some_df.registerAsTable("people")
>     teenagers = sqlContext.sql("SELECT * FROM people ")
>     for each in teenagers.collect():
>         print(each)
>     sc.stop()



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org


Mime
View raw message