This repository has been archived on 2023-06-18. You can view files and clone it, but cannot push or open issues or pull requests.
va-project/backend/api/companies.py

36 lines
1.2 KiB
Python
Raw Normal View History

2023-05-08 10:16:06 +00:00
import os
import pandas as pd
import numpy as np
from scraper.top100_extractor import programming_crime_list
2023-05-08 10:16:06 +00:00
COMPANIES_CSV_PATH: str = 'scraper/companies.csv'
COMPANY_DATA_CSV_PATH: str = 'Elaborated_Data/normalized_data.csv'
2023-05-08 10:16:06 +00:00
2023-05-10 08:33:40 +00:00
def non_nan(a: list[any]) -> list[any]:
return list(filter(lambda a: type(a) == str or not np.isnan(a), a))
2023-05-08 10:16:06 +00:00
def get_companies(root_dir: str) -> list[dict]:
"""
reads the companies.csv file and returns it as a JSON-ifiable object
to return to the frontend.
"""
df = pd.read_csv(os.path.join(root_dir, COMPANIES_CSV_PATH), index_col='ticker')
tickers = pd.Series(programming_crime_list)
df = df.loc[df.index.isin(tickers), :]
2023-05-10 08:33:40 +00:00
df['tags'] = df[['tag 1', 'tag 2', 'tag 3']].values.tolist()
df['tags'] = df['tags'].apply(non_nan)
del df['tag 1']
del df['tag 2']
del df['tag 3']
# Include company metrics
df_data = pd.read_csv(os.path.join(root_dir, COMPANY_DATA_CSV_PATH), index_col='Ticker') \
.loc[:, ['Valuation', 'Financial Health', 'Estimated Growth', 'Past Performance']]
# Compute limits of metrics
# print(df_data.agg([min, max]).to_dict('records'))
df = df.join(df_data)
2023-05-08 10:16:06 +00:00
return df.reset_index().replace({ np.nan: None }).to_dict('records')