Commit fdd2a0a8 by Mohammed Shibili

completed

parents
# Default ignored files
/shelf/
/workspace.xml
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (pythonProject)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/pythonProject.iml" filepath="$PROJECT_DIR$/.idea/pythonProject.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/venv" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
from script.servieces.panda_operations import read_pandas
read_pandas()
[logg]
data_path=script/utils/MOCK_DATA.csv
new_data_path=script/utils/appended_data.csv
chunk_size=100
\ No newline at end of file
import configparser
config = configparser.ConfigParser()
config.read("conf/application.conf")
old_csv = config.get('logg', 'data_path')
new_csv = config.get('logg', 'new_data_path')
chunk_size = int(config.get('logg', 'chunk_size'))
\ No newline at end of file
import pandas as pd
from script.configuration.appconfig import old_csv, chunk_size, new_csv
def adding():
data_frame = pd.read_csv(old_csv, chunksize=chunk_size)
head_of_new_data = []
i = 1
new_dataframe = pd.DataFrame(columns=head_of_new_data)
for chunks in data_frame:
chunks['full_name'] = chunks['first_name'] + chunks['last_name']
while i > 0:
head_of_new_data.append(chunks.head(0))
i = i - 1
new_dataframe = pd.concat([new_dataframe, chunks])
new_dataframe.to_csv(new_csv, index=False)
import pandas as pd
from script.configuration.appconfig import new_csv
new_dataset = pd.read_csv(new_csv)
# locating with loc()
def locate(search_id, row_start, row_end, column_start, column_end):
result = new_dataset.loc[new_dataset['id'] == search_id]
print(f'output of loc() is \n{result}')
# locating with iloc
result_of_loc = new_dataset.iloc[[row_start, row_end], [column_start, column_end]]
print(f'output of iloc() is \n {result_of_loc}')
from script.core.handlers.full_name import adding
from script.core.handlers.locating import locate
def read_pandas():
adding()
try:
search_id = int(input("enter the id to search"))
row_start = int(input("enter starting number of row"))
row_end = int(input("enter ending number of row"))
column_start = int(input("enter starting number of column"))
column_end = int(input("enter ending number of column"))
except Exception as e:
print(e, "input should be integer")
locate(search_id, row_start, row_end, column_start, column_end)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment