2024-07-18 20:04:51 -04:00
|
|
|
import pandas as pd
|
|
|
|
import json
|
2024-10-02 21:53:37 -04:00
|
|
|
import os
|
|
|
|
from pathlib import Path
|
2024-07-18 20:04:51 -04:00
|
|
|
|
|
|
|
from utils.documents_to_database import documents_to_database
|
|
|
|
from utils.convert_encoding_meta import convert_encoding_meta
|
|
|
|
|
2024-11-30 22:03:49 -05:00
|
|
|
## %% Déterminer le chemin du répertoire du script
|
2024-10-02 21:53:37 -04:00
|
|
|
try:
|
2024-11-30 22:03:49 -05:00
|
|
|
# Ceci fonctionnera lors de l'exécution en tant que script
|
2024-10-02 21:53:37 -04:00
|
|
|
script_dir = Path(__file__).parent.parent
|
|
|
|
except NameError:
|
2024-11-30 22:03:49 -05:00
|
|
|
# Ceci fonctionnera dans des environnements interactifs
|
2024-10-02 21:53:37 -04:00
|
|
|
script_dir = Path().absolute()
|
2024-07-18 20:04:51 -04:00
|
|
|
|
2024-10-02 21:53:37 -04:00
|
|
|
project_root = script_dir
|
|
|
|
fb_data_path = [os.path.join(project_root, 'import_data', 'data', 'FacebookBusiness', 'posts', 'profile_posts_1.json')]
|
2024-07-18 20:04:51 -04:00
|
|
|
|
2024-11-30 22:03:49 -05:00
|
|
|
## %% Lire et parser le fichier JSON des posts Facebook
|
2024-10-02 21:53:37 -04:00
|
|
|
try:
|
|
|
|
with open(fb_data_path[0], "r", encoding="raw-unicode-escape") as posts:
|
|
|
|
posts_json = json.loads(convert_encoding_meta(posts.read()))
|
|
|
|
except Exception as e:
|
|
|
|
print(f"Error reading JSON file: {e}")
|
|
|
|
exit(1)
|
|
|
|
|
2024-11-30 22:03:49 -05:00
|
|
|
## %% Extraire les données pertinentes de chaque post
|
2024-07-18 20:04:51 -04:00
|
|
|
posts_medias = []
|
|
|
|
for post in posts_json:
|
2024-11-30 22:03:49 -05:00
|
|
|
# Extraire le texte du post
|
2024-07-18 20:04:51 -04:00
|
|
|
data_post_items = post['data']
|
|
|
|
texte_post_list = []
|
|
|
|
for item in data_post_items:
|
|
|
|
if item.get('post'):
|
|
|
|
texte_post_list.append(item['post'])
|
|
|
|
texte = "\n".join(texte_post_list)
|
2024-11-30 22:03:49 -05:00
|
|
|
|
|
|
|
# Traiter les pièces jointes du post
|
2024-07-18 20:04:51 -04:00
|
|
|
for attachment in post['attachments']:
|
|
|
|
if attachment.get('data'):
|
|
|
|
for data_item in attachment['data']:
|
|
|
|
if data_item.get('media'):
|
|
|
|
media = data_item['media']
|
|
|
|
if len(texte) > 1:
|
|
|
|
posts_medias.append({"network": "FacebookBusiness",
|
|
|
|
"type": "posts",
|
|
|
|
"index": "rs_facebookbusiness_posts",
|
|
|
|
"chemin": fb_data_path[0],
|
|
|
|
"texte": texte,
|
|
|
|
"creation_timestamp": media["creation_timestamp"]})
|
|
|
|
|
2024-11-30 22:03:49 -05:00
|
|
|
## %% Créer un DataFrame à partir des données extraites
|
2024-07-18 20:04:51 -04:00
|
|
|
posts_medias_df = pd.DataFrame(posts_medias)
|
|
|
|
|
2024-11-30 22:03:49 -05:00
|
|
|
## %% Remplacer les valeurs NaN par des chaînes vides
|
2024-07-18 20:04:51 -04:00
|
|
|
posts_medias_df.fillna(value="", inplace=True)
|
|
|
|
|
2024-11-30 22:03:49 -05:00
|
|
|
## %% Supprimer les doublons basés sur le texte et le timestamp de création
|
2024-10-02 21:53:37 -04:00
|
|
|
posts_medias_df.drop_duplicates(subset=['texte', 'creation_timestamp'], inplace=True)
|
2024-07-18 20:04:51 -04:00
|
|
|
|
2024-11-30 22:03:49 -05:00
|
|
|
## %% Envoyer les données à la base de données
|
|
|
|
documents_to_database(posts_medias_df)
|