Commit 484f2e4a authored by Erly Villaroel's avatar Erly Villaroel

Metodos para la base de datos

parent bee8601f
from app import MainApplication from app import MainApplication
import warnings import warnings
from sqlalchemy.orm import sessionmaker, scoped_session
from pyspark.sql import SparkSession
from decimal import Decimal
from pyspark.sql.types import StructType, StructField, StringType, DecimalType, ArrayType
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
from sqlalchemy import create_engine
from app.main.engine.util.Utils import Utils
base = MainApplication() base = MainApplication()
app = base.create_app() app = base.create_app()
#
# if __name__ == "__main__":
# base.run(port=8000)
spark = SparkSession.builder \
.appName("Crear DataFrame en PySpark") \
.getOrCreate()
# Especificar el esquema del DataFrame
schema = StructType([
StructField("PIVOT_Fecha", StringType(), True),
StructField("COUNTERPART_Fecha", StringType(), True),
StructField("Cuenta", StringType(), True),
StructField("Account", StringType(), True),
StructField("DIFF", DecimalType(10, 2), True),
StructField("LISTA_DIFF", ArrayType(StringType()), True),
StructField("INTER_PIVOT_ID", StringType(), True),
StructField("INTER_CTP_ID", StringType(), True),
StructField("PIVOT_Valor", DecimalType(10, 2), True),
StructField("COUNTERPART_Valor", DecimalType(10, 2), True)
])
# Crear el DataFrame con datos de ejemplo
data = [
("2024-04-01", "2024-04-01", "Cuenta1", "Account1", Decimal('10.50'), ['1', '2', '3'], "ID1", "ID2", Decimal('100.00'), Decimal('95.00')),
("2024-04-02", "2024-04-02", "Cuenta2", "Account2", Decimal('15.75'), ['4', '5', '6'], "ID3", "ID4", Decimal('200.00'), Decimal('190.00')),
("2024-04-03", "2024-04-03", "Cuenta3", "Account3", Decimal('20.25'), ['7', '8', '9'], "ID5", "ID6", Decimal('300.00'), Decimal('280.00'))
]
df = spark.createDataFrame(data, schema)
# Mostrar el DataFrame if __name__ == "__main__":
df.show() base.run(port=8000)
descriptor = {
"idProcess" : 500240,
"idScript": "match-and-exclude",
"config-params":{
"max-records-per-combination": 10,
"max-timeout-per-combination": 1000,
"exclude-entity-pivot": True
},
"params-input": {
"pivot-config": {
"tablename" : "PIVOT_TEMPORAL",
"id-column" : "ID",
"amount-columns" : "Valor",
"columns-group" : ["Fecha", "Cuenta"],
"columns-transaction" : ["Fecha", "Cuenta", "Valor"]
},
"counterpart-config": {
"tablename" : "PIVOT_TEMPORAL",
"id-column" : "ID",
"amount-columns" : "Valor",
"columns-group" : ["Fecha", "Account"],
"columns-transaction" : ["Fecha", "Account", "Valor"]
}
}
}
a = Utils(app).create_result(df, descriptor)
print(a)
engine = create_engine("mysql+pymysql://root:root@192.168.0.11:3301/cusca")
session_factory = sessionmaker(bind=engine)
session = session_factory()
b = Utils(app).save_result(a["detail"],descriptor, session)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment