I have a dataframe with a column of string datatype, but the actual representation is array type.
import pyspark
from pyspark.sql import Row
item = spark.createDataFrame([Row(item='fish',geography=['london','a','b','hyd']),
Row(item='chicken',geography=['a','hyd','c']),
Row(item='rice',geography=['a','b','c','blr']),
Row(item='soup',geography=['a','kol','simla']),
Row(item='pav',geography=['a','del']),
Row(item='kachori',geography=['a','guj']),
Row(item='fries',geography=['a','chen']),
Row(item='noodles',geography=['a','mum'])])
item.show()
# +-------+-------------------+
# | item| geography|
# +-------+-------------------+
# | fish|[london, a, b, hyd]|
# |chicken| [a, hyd, c]|
# | rice| [a, b, c, blr]|
# | soup| [a, kol, simla]|
# | pav| [a, del]|
# |kachori| [a, guj]|
# | fries| [a, chen]|
# |noodles| [a, mum]|
# +-------+-------------------+
print(item.printSchema())
# root
# |-- item: string (nullable = true)
# |-- geography: string (nullable = true)
How to convert the geography column in the above dataset to array type?
F.expr("regexp_extract_all(geography, '(\\\\w+)', 1)")
regexp_extract_all
is available from Spark 3.1+
regexp_extract_all(str, regexp[, idx])
- Extract all strings in thestr
that match theregexp
expression and corresponding to the regex group index.
from pyspark.sql import Row, functions as F
item = spark.createDataFrame([Row(item='fish',geography="['london','a','b','hyd']"),
Row(item='noodles',geography="['a','mum']")])
item.printSchema()
# root
# |-- item: string (nullable = true)
# |-- geography: string (nullable = true)
item = item.withColumn('geography', F.expr("regexp_extract_all(geography, '(\\\\w+)', 1)"))
item.printSchema()
# root
# |-- item: string (nullable = true)
# |-- geography: array (nullable = true)
# | |-- element: string (containsNull = true)
Collected from the Internet
Please contact [email protected] to delete if infringement.
Comments