Appendix A — Measuring accessibility

This notebook contains the code used to develop accessibility models for Tyne and Wear. The same backbone applies to both job and green space accessibility.

A.1 0. Variables definition and data import

# definitions
import sys
import numpy as np
import pandas as pd
import geopandas as gpd
import datetime as dt
import tracc
from r5py import TransportNetwork, TravelTimeMatrixComputer, TransitMode, LegMode
from datetime import datetime, date, timedelta
import matplotlib.pyplot as plt
from itertools import product  # needed for generating all combinations of O-D pairs

sys.argv.append(["--max-memory", "8G"])


data_folder = "/Users/azanchetta/OneDrive - The Alan Turing Institute/demoland_data"


# regional level files: (require previous editing)
oas_centroids_file = (
    f"{data_folder}/processed/OA_centroids_TyneWear.gpkg"  # used for population origin
)
oas_file = f"{data_folder}/processed/authorities/OA_TyneWear.gpkg"  # needed for visualisation purposes
region_lads_file = f"{data_folder}/processed/authorities/LADs_tynewear.shp"  # needed in order to filter greenspace data within the regional boundaries
workingplacezones_centroids_file = f"{data_folder}/processed/authorities/WPZ_centroids_tynewear.gpkg"  # needed for destinations centroids coordinates
# greenspace_sites_file = f"{data_folder}/processed/accessibility/greenspace-sites_tynewear.gpkg" # needed for calcualting opportunities at greenspaces (area)
# greenspace_entrances_file = f"{data_folder}/processed/accessibility/accessTOgs_tynewear.gpkg" # needed for destinations centroids coordinates
greenspace_file = (
    f"{data_folder}/processed/accessibility/greenspace_tynewear_edited.gpkg"
)
jobs_file = f"{data_folder}/processed/accessibility/wpz_tynewear_occupation_edited.csv"

# national level files
# greenspace_file = f"{data_folder}/raw/accessibility/OS Open Greenspace (GPKG) GB/data/opgrsp_gb.gpkg"
osm_data_file = f"{data_folder}/raw/accessibility/tyne-and-wear-latest.osm.pbf"
gtfs_data_file = f"{data_folder}/raw/accessibility/itm_north_east_gtfs.zip"
# import

# origins (IE output areas, OAs)
oas_centroids = gpd.read_file(oas_centroids_file, layer="OA_centroids_TyneWear")
oas_centroids["id"] = oas_centroids[
    "OA11CD"
]  # Origin dataset must contain an 'id' column for r5py
oas_centroids.head()

# destination data
# green space sites' entrances
gs_entrances = gpd.read_file(greenspace_file, layer="access_points")

gs_entrances.head()  # Destination dataset already contains an 'id' column
# WPZ centroids
wpz_centroids = gpd.read_file(
    workingplacezones_centroids_file, layer="WPZ_centroids_tynewear"
)
wpz_centroids.head()
wpz_centroids["id"] = wpz_centroids[
    "wz11cd"
]  # Destination dataset must contain an 'id' column for r5py

gs_sites = gpd.read_file(greenspace_file, layer="sites")

# network data
# uploaded in the sequent operation

# opportunities / land use data
jobs_per_wpz_df = pd.read_csv(
    jobs_file
)  # working place zones, population (as a proxy for n of jobs)
# note: opportunities column is called "pop"
gs_entrances.explore()
Make this Notebook Trusted to load map: File -> Trust Notebook

A.1.1 CRS conversion

# Converting the original files' crs to GWS84, which is compatible with GTFS and OSM data
oas_centroids_wgs84 = oas_centroids.to_crs("epsg:4326")
gs_entrances = gs_entrances.to_crs("epsg:4326")
# gs_sites = gs_sites.to_crs("epsg:4326") # let's leave the layer in epsg:27700, as we need the prj for calculating the areas
wpz_centroids = wpz_centroids.to_crs("epsg:4326")

A.1.2 Origins and destinations

oas_centroids.head()
OBJECTID OA11CD GlobalID geometry id
0 126926 E00041377 c03c9813-26f3-41f9-85e5-d4cdf3742ca0 POINT (425583.000 562952.000) E00041377
1 126927 E00041435 16e6607e-0b59-4f6f-8ec6-06a7396a70a5 POINT (427216.699 555732.531) E00041435
2 126928 E00041745 4b5fa995-b251-4ee7-9a97-aef0a2598fe3 POINT (427897.004 559557.605) E00041745
3 126929 E00041432 6e660884-3917-4e46-a693-bad0821318cb POINT (427856.367 555759.595) E00041432
4 126930 E00041742 0bfb7f06-a910-4fa2-8db1-e79d319ba232 POINT (427932.556 559770.754) E00041742
wpz_centroids.head()
OBJECTID wz11cd GlobalID geometry id
0 2 E33000251 {AF2BD35C-B624-4E2D-9C78-F26DF4FCABCE} POINT (-1.41992 54.91839) E33000251
1 3 E33000799 {8CB93749-3349-462C-93C7-B6E321CC765C} POINT (-1.61606 54.97382) E33000799
2 4 E33000257 {03204BF6-50A6-4AD1-855F-C7BBE6D8137B} POINT (-1.53272 54.90010) E33000257
3 5 E33000079 {53333BDF-9792-4370-94AB-BE7853FA2ACA} POINT (-1.62268 55.01104) E33000079
4 8 E33000174 {35114C58-FAA7-4E83-9724-ACED166052D5} POINT (-1.50942 55.02269) E33000174
gs_entrances.head()
id accessType refToGreenspaceSite geometry
0 idD93E3AB6-BDCE-483D-B3CF-4242FA90A0B7 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.55733 55.03322)
1 id951F323D-8E88-4A5B-B9A4-37E0D69DD870 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.56184 55.03333)
2 id0E14522B-427F-47C1-B043-BC3847ABE673 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.56197 55.03340)
3 id0FECA8F4-6053-4147-A11D-62B01EC6C135 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.55989 55.03344)
4 id1BED7A99-E143-48C3-90CE-B7227E820454 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.55988 55.03359)
# origins:
#   OAs
# destinations:
#   gs: entrances + OAs centroids
#   jobs: wpz centroids + OAs centroids
# total destination: OAs centroids + wpz centroids + gs entrances

origins = oas_centroids_wgs84

# destinations common fields: 'id', 'geometry'
# simply concatenate the dataframes...
# need to keep the info on greenspace site's name to link with the entrances later on

destinations = pd.concat(
    [
        oas_centroids_wgs84[["id", "geometry"]],
        wpz_centroids[["id", "geometry"]],
        gs_entrances[["id", "geometry", "refToGreenspaceSite"]],
    ]
).reset_index(drop=True)

A.1.3 Opportunities

# jobs: n of employees per WPZ
# greenspace: area of site


# add column with opportunity ... one for all?

A.2 1. Travel time matrix computation

A.2.1 Generate the transport network

Compute the network starting from OSM and GTFS data

# load in transport network
transport_network = TransportNetwork(osm_data_file, [gtfs_data_file])

A.2.2 Create an empty matrix that contains all origins and destinations to be used later on

This table will be filled up once we calculate the ttm

# # # only for testing purposes:
# k = 1000
# # selecting first n rows of dataframe for origins and destinations
# # origins = oas_centroids.loc[:k, :]
# # destinations = wpz_centroids.loc[:n, :]
# # selecting random rows, so to make sure we have both wpz AND gs_entrances in the selection of destinations
# origins = origins.sample(n=k)
# destinations = destinations.sample(n=k)
# generate dataframe with all from_id and all to_id pairs
# (empty for now, to be filled up later on)
prod = product(origins["id"].unique(), destinations["id"].unique())
empty_ttm = pd.DataFrame(prod)
empty_ttm.columns = ["from_id", "to_id"]
empty_ttm.head()
from_id to_id
0 E00041377 E00041377
1 E00041377 E00041435
2 E00041377 E00041745
3 E00041377 E00041432
4 E00041377 E00041742

A.2.3 Travel time matrix

The following piece of code is split in 2: - first part is definition of variables that will be inputted as parameters in the ttm computation - second part is the loop to generate ttm for several transport modes

# defining variables
date_time = "2023,01,19,9,30"  # CHOOSE BEST DATE/TIME
# max_time = dt.timedelta(seconds=900) # SET TO 15 MIN
walking_speed = 4.8
cycling_speed = 16
# dataframe to match legmode and transitmode objects (to be inputted in the ttm computer):
modes_lut = pd.DataFrame(
    [
        ["transit", TransitMode.TRANSIT, LegMode.WALK],
        ["car", "", LegMode.CAR],
        ["bicycle", "", LegMode.BICYCLE],
        ["walk", "", LegMode.WALK],
    ],
    columns=("Mode", "Transit_mode", "Leg_mode"),
)


# function to generate custom list of transit+transport mode for the parameter transport_modes in TravelTimeMatrixComputer
def list_making(s, z):
    return [s] + [z]


ttm_complete = empty_ttm

# loop to compute a ttm for all the modes and generate one single ttm table in output
for row in modes_lut.itertuples():
    start_time = dt.datetime.now()
    mode = row.Mode
    transit_mode = row.Transit_mode
    leg_mode = row.Leg_mode
    transport_mode = list_making(
        transit_mode, leg_mode
    )  # creating list of objects for transport_modes parameter

    print(
        "The current mode is:",
        mode,
        ", transit is:",
        transit_mode,
        ", transport var is:",
        transport_mode,
    )
    ttm_computer = TravelTimeMatrixComputer(
        transport_network,
        origins=origins,
        destinations=destinations,
        departure=dt.datetime.strptime(date_time, "%Y,%m,%d,%H,%M"),
        # max_time = max_time,
        speed_walking=walking_speed,
        speed_cycling=cycling_speed,
        transport_modes=transport_mode,
    )

    ttm = ttm_computer.compute_travel_times()
    ttm = ttm.rename(
        columns={"travel_time": f"time_{mode}"}
    )  # renaming 'travel_time' column (automatically generated) to 'time_{mode of transport}'
    ttm.isna().sum()  # checking for empty values, to see if the ttm actually calculated something
    #  merging the empty table generated before (with all possible origins and destinations) with the ttm, per each mode adding a travel time column
    ttm_complete = ttm_complete.merge(
        ttm, how="outer", left_on=["from_id", "to_id"], right_on=["from_id", "to_id"]
    )

    print("finished calculating ttm for mode", mode)
    end_time = datetime.now()
    print("Duration for", mode, ": {}".format(end_time - start_time))
The current mode is: transit , transit is: TransitMode.TRANSIT , transport var is: [<TransitMode.TRANSIT: <java object 'com.conveyal.r5.api.util.TransitModes'>>, <LegMode.WALK: <java object 'com.conveyal.r5.api.util.LegMode'>>]
finished calculating ttm for mode transit
Duration for transit : 0:07:34.098400
The current mode is: car , transit is:  , transport var is: ['', <LegMode.CAR: <java object 'com.conveyal.r5.api.util.LegMode'>>]
finished calculating ttm for mode car
Duration for car : 0:21:01.904903
The current mode is: bicycle , transit is:  , transport var is: ['', <LegMode.BICYCLE: <java object 'com.conveyal.r5.api.util.LegMode'>>]
finished calculating ttm for mode bicycle
Duration for bicycle : 0:16:26.882727
The current mode is: walk , transit is:  , transport var is: ['', <LegMode.WALK: <java object 'com.conveyal.r5.api.util.LegMode'>>]
finished calculating ttm for mode walk
Duration for walk : 0:03:23.352848
/usr/local/anaconda3/envs/demoland_r5/lib/python3.9/site-packages/r5py/r5/regional_task.py:224: RuntimeWarning: Departure time 2023-01-19 09:30:00 is outside of the time range covered by currently loaded GTFS data sets.
  warnings.warn(
ttm_complete.head()
from_id to_id time_transit time_car time_bicycle time_walk
0 E00041377 E00041377 0.0 0 0.0 0.0
1 E00041377 E00041435 31.0 12 37.0 99.0
2 E00041377 E00041745 32.0 11 25.0 63.0
3 E00041377 E00041432 43.0 16 39.0 107.0
4 E00041377 E00041742 33.0 12 24.0 60.0
# # saving ttm in output
# ttm_complete.to_parquet(f"{data_folder}/processed/accessibility/ttm_complete.parquet")

A.3 2. Accessibility calculation

Using jamaps/tracc package

A.4 Accessibility to jobs

ttm_jobs = ttm_complete.copy(
    deep=True
)  # saving a copy of the matrix (the following operations will add columns to it, but we want to keep the original one also)

# generate tracc cost object
ttm_jobs_tracc = tracc.costs(ttm_jobs)

modes_list = ["transit", "car", "bicycle", "walk"]

# empty dataframe to be filled up in the next for loop
acc_pot_jobs = origins[["id"]]

for m in modes_list:
    # generate variable names to be used in the tracc function below
    cost_name = "time_" + m
    travel_costs_ids = ["from_id", "to_id"]
    supplyID = "wpz11cd"
    impedence_param = 15  # value for impedence function, to be changed as needed
    impedence_param_string = str(impedence_param)
    cost_output = "cum_" + impedence_param_string + "_" + m
    acc_column_name = "pot_cum_acc_" + impedence_param_string + "_" + m
    opportunity = "pop"
    # Computing impedance function based on a 15 minute travel time threshold.
    ttm_jobs_tracc.impedence_calc(
        cost_column=cost_name,
        impedence_func="cumulative",
        impedence_func_params=impedence_param,  # to calculate n of jobs in n min threshold
        output_col_name=cost_output,
        prune_output=False,
    )

    # Setting up the accessibility object. This includes joining the destination data to the travel time data
    acc_jobs = tracc.accessibility(
        travelcosts_df=ttm_jobs_tracc.data,
        supply_df=jobs_per_wpz_df,
        travelcosts_ids=travel_costs_ids,
        supply_ids=supplyID,
    )
    acc_jobs.data.head()

    # Measuring potential accessibility to jobs, using a 15 minute cumulative impedance function
    # acc_pot_jobs = acc_jobs.potential(
    #         opportunity = "pop",
    #         impedence = cost_output,
    #         output_col_name= "pot_acc_" + cost_output
    #         )
    # the above function generate overwrite the column at every loop
    # so we reproduce the same function (from tracc documentation) per each mode:
    acc_jobs.data[acc_column_name] = (
        acc_jobs.data[opportunity] * acc_jobs.data[cost_output]
    )
    group_sum_bymode_acc = acc_jobs.data.groupby(acc_jobs.data[travel_costs_ids[0]])[
        [acc_column_name]
    ].sum()
    acc_pot_jobs = acc_pot_jobs.merge(
        group_sum_bymode_acc, how="outer", left_on="id", right_on="from_id"
    )
acc_jobs.data.head()
from_id to_id time_transit time_car time_bicycle time_walk cum_15_transit cum_15_car cum_15_bicycle cum_15_walk wpz11cd pop pot_cum_acc_15_walk
0 E00041377 E00041377 0.0 0 0.0 0.0 1 1 1 1 NaN NaN NaN
1 E00041377 E00041435 31.0 12 37.0 99.0 0 1 0 0 NaN NaN NaN
2 E00041377 E00041745 32.0 11 25.0 63.0 0 1 0 0 NaN NaN NaN
3 E00041377 E00041432 43.0 16 39.0 107.0 0 0 0 0 NaN NaN NaN
4 E00041377 E00041742 33.0 12 24.0 60.0 0 1 0 0 NaN NaN NaN
acc_pot_jobs.head()
id pot_cum_acc_15_transit pot_cum_acc_15_car pot_cum_acc_15_bicycle pot_cum_acc_15_walk
0 E00041377 32139.0 318618.0 107910.0 11817.0
1 E00041435 4839.0 164613.0 8649.0 3814.0
2 E00041745 865.0 208472.0 9597.0 865.0
3 E00041432 2086.0 67634.0 11214.0 2086.0
4 E00041742 865.0 170808.0 10267.0 615.0
# saving output to external file

A.5 Accessibility to greenspace

# edit greenspace layers
# change the 'id' column name, as it's the same in both layers and generates issues later on
gs_entrances.columns  # ['id', 'accessType', 'refToGreenspaceSite', 'geometry']
gs_entrances.rename(columns={"id": "id_entrance"}, inplace=True)
gs_sites.columns  # ['id', 'function', 'geometry']
gs_sites.rename(columns={"id": "id_site"}, inplace=True)

# calculates sites' area:
gs_sites["area_m2"] = gs_sites["geometry"].area
gs_entrances.head()
gs_sites.head()
gs_sites.explore(column="area_m2", cmap="plasma", scheme="NaturalBreaks", k=10)
Make this Notebook Trusted to load map: File -> Trust Notebook
gs_entrances.head()
id_entrance accessType refToGreenspaceSite geometry
0 idD93E3AB6-BDCE-483D-B3CF-4242FA90A0B7 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.55733 55.03322)
1 id951F323D-8E88-4A5B-B9A4-37E0D69DD870 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.56184 55.03333)
2 id0E14522B-427F-47C1-B043-BC3847ABE673 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.56197 55.03340)
3 id0FECA8F4-6053-4147-A11D-62B01EC6C135 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.55989 55.03344)
4 id1BED7A99-E143-48C3-90CE-B7227E820454 Pedestrian idE56DE6C0-48DC-13A9-E053-AAEFA00A0D0E POINT (-1.55988 55.03359)
gs_sites.head()
id_site function geometry area_m2
0 idE56DE6D8-CA9A-13A9-E053-AAEFA00A0D0E Play Space MULTIPOLYGON (((440767.260 552692.600, 440777.... 1560.70565
1 idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E Religious Grounds MULTIPOLYGON (((440761.280 552942.510, 440753.... 1966.87245
2 idE56DE6D8-C9BD-13A9-E053-AAEFA00A0D0E Religious Grounds MULTIPOLYGON (((440968.500 552987.220, 440983.... 8135.95125
3 idE56DE6D8-8F64-13A9-E053-AAEFA00A0D0E Religious Grounds MULTIPOLYGON (((439560.480 560021.050, 439578.... 2868.07275
4 idE56DE6D8-8F65-13A9-E053-AAEFA00A0D0E Cemetery MULTIPOLYGON (((439858.700 560473.170, 439817.... 153540.65735
# associate park area to entrances
gs_entrances_with_parkarea = pd.merge(
    gs_entrances[["id_entrance", "refToGreenspaceSite"]],
    gs_sites[["id_site", "function", "area_m2"]],
    left_on="refToGreenspaceSite",
    right_on="id_site",
    how="right",
)
gs_entrances_with_parkarea.head()
id_entrance refToGreenspaceSite id_site function area_m2
0 idCAC0A6B3-0FDB-446D-8E36-700AF2CC1256 idE56DE6D8-CA9A-13A9-E053-AAEFA00A0D0E idE56DE6D8-CA9A-13A9-E053-AAEFA00A0D0E Play Space 1560.70565
1 idCE043231-4C15-4265-A370-2D70261224C7 idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E Religious Grounds 1966.87245
2 id379B3089-2FF5-4BD3-B695-9B7DA915FB02 idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E Religious Grounds 1966.87245
3 id7AE0057A-2F40-43F3-970E-A517BBC99804 idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E Religious Grounds 1966.87245
4 idE5DAAC5C-29B5-49A0-BB46-62C78F46BA6C idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E idE56DE6D8-C9DE-13A9-E053-AAEFA00A0D0E Religious Grounds 1966.87245
ttm_complete.head()
from_id to_id time_transit time_car time_bicycle time_walk
0 E00041377 E00041377 0.0 0 0.0 0.0
1 E00041377 E00041435 31.0 12 37.0 99.0
2 E00041377 E00041745 32.0 11 25.0 63.0
3 E00041377 E00041432 43.0 16 39.0 107.0
4 E00041377 E00041742 33.0 12 24.0 60.0
ttm_greenspace = (
    ttm_complete.copy()
)  # saving a copy of the matrix (the following operations will add columns to it, but we want to keep the original one also)


ttm_gs_with_area = pd.merge(
    ttm_greenspace,
    gs_entrances_with_parkarea[["id_entrance", "refToGreenspaceSite", "area_m2"]],
    left_on="to_id",
    right_on="id_entrance",
    how="left",
)
# generate tracc cost object
ttm_gs_tracc = tracc.costs(ttm_gs_with_area)

modes_list = ["transit", "car", "bicycle", "walk"]

# empty dataframes to be filled up in the next for loop
acc_pot_gs = origins[["id"]]
gs_acc = []

for m in modes_list:
    # generate variable names to be used in the tracc function below
    cost_name = "time_" + m
    travel_costs_ids = ["from_id", "to_id"]
    impedence_param = 15  # value for impedence function, to be changed as needed
    impedence_param_string = str(impedence_param)
    # name of the column
    cost_output = (
        "cum_" + impedence_param_string + "_" + m
    )  # naming depends on impedence function threshold
    area_column_name = "area_" + impedence_param_string + "_" + m
    acc_column_name = (
        "pot_cum_acc_" + impedence_param_string + "_" + m
    )  # naming depends on impedence function threshold
    opportunity = "pop"
    # Computing impedence function based on a 15 minute travel time threshold.
    ttm_gs_tracc.impedence_calc(
        cost_column=cost_name,
        impedence_func="cumulative",
        impedence_func_params=impedence_param,  # to calculate opportunities in X min threshold
        output_col_name=cost_output,
        prune_output=False,
    )
    ttm_gs_df = ttm_gs_tracc.data
    print(ttm_gs_df.columns)
    # Setting up the accessibility object. This includes joining the destination data to the travel time data
    # this needed to be done differently for greenspace, as opportunity is sites's area cumulative sum
    # A. Filtering only rows with time travel within the threshold
    print("cost output is", cost_output)
    print("area column name is", area_column_name)
    # tracc_15min = ttm_gs_tracc.data[ttm_gs_tracc.data.loc[:,cost_output]==1] # this doesn't work because of the different lenghts of the columns generated per mode
    ttm_gs_tracc.data[area_column_name] = (
        ttm_gs_tracc.data["area_m2"] * ttm_gs_tracc.data[cost_output]
    )
    ttm_gs_df = ttm_gs_tracc.data

    # B. Filter entrances (only one per park)
    oneaccess_perpark = ttm_gs_df.sort_values(cost_name).drop_duplicates(
        ["from_id", "refToGreenspaceSite"]
    )
    oneaccess_perpark.head()
    # C. Assign metric as sum[parks' area]
    # generate df with one row per OA centroid ('from_id') and sum of sites' areas - per each mode
    gs_metric_per_mode = oneaccess_perpark.groupby(["from_id"])[
        area_column_name
    ].sum()  # .reset_index()
    gs_acc.append(gs_metric_per_mode)
gs_acc = pd.concat(gs_acc, axis=1)
Index(['from_id', 'to_id', 'time_transit', 'time_car', 'time_bicycle',
       'time_walk', 'id_entrance', 'refToGreenspaceSite', 'area_m2',
       'cum_15_transit'],
      dtype='object')
cost output is cum_15_transit
area column name is area_15_transit
Index(['from_id', 'to_id', 'time_transit', 'time_car', 'time_bicycle',
       'time_walk', 'id_entrance', 'refToGreenspaceSite', 'area_m2',
       'cum_15_transit', 'area_15_transit', 'cum_15_car'],
      dtype='object')
cost output is cum_15_car
area column name is area_15_car
Index(['from_id', 'to_id', 'time_transit', 'time_car', 'time_bicycle',
       'time_walk', 'id_entrance', 'refToGreenspaceSite', 'area_m2',
       'cum_15_transit', 'area_15_transit', 'cum_15_car', 'area_15_car',
       'cum_15_bicycle'],
      dtype='object')
cost output is cum_15_bicycle
area column name is area_15_bicycle
Index(['from_id', 'to_id', 'time_transit', 'time_car', 'time_bicycle',
       'time_walk', 'id_entrance', 'refToGreenspaceSite', 'area_m2',
       'cum_15_transit', 'area_15_transit', 'cum_15_car', 'area_15_car',
       'cum_15_bicycle', 'area_15_bicycle', 'cum_15_walk'],
      dtype='object')
cost output is cum_15_walk
area column name is area_15_walk
ttm_gs_tracc.data.head()
from_id to_id time_transit time_car time_bicycle time_walk id_entrance refToGreenspaceSite area_m2 cum_15_transit area_15_transit cum_15_car area_15_car cum_15_bicycle area_15_bicycle cum_15_walk area_15_walk
0 E00041377 E00041377 0.0 0 0.0 0.0 NaN NaN NaN 1 NaN 1 NaN 1 NaN 1 NaN
1 E00041377 E00041435 31.0 12 37.0 99.0 NaN NaN NaN 0 NaN 1 NaN 0 NaN 0 NaN
2 E00041377 E00041745 32.0 11 25.0 63.0 NaN NaN NaN 0 NaN 1 NaN 0 NaN 0 NaN
3 E00041377 E00041432 43.0 16 39.0 107.0 NaN NaN NaN 0 NaN 0 NaN 0 NaN 0 NaN
4 E00041377 E00041742 33.0 12 24.0 60.0 NaN NaN NaN 0 NaN 1 NaN 0 NaN 0 NaN

Exporting results in output

acc_pot_jobs.to_csv(
    f"{data_folder}/processed/accessibility/acc_jobs_allmodes_15min_tynewear.csv"
)
gs_acc.to_csv(
    f"{data_folder}/processed/accessibility/acc_greenspace_allmodes_15min_tynewear.csv"
)

Plotting results

oas_boundaries = gpd.read_file(oas_file, layer="OA_TyneWear")
oas_boundaries_wgs84 = oas_boundaries.to_crs("epsg:4326")
oas_boundaries_jobs = oas_boundaries_wgs84.merge(
    acc_pot_jobs, left_on="geo_code", right_on="id", how="right"
)
oas_boundaries_jobs.plot(
    "pot_cum_acc_15_transit", cmap="plasma", scheme="NaturalBreaks", k=10
)
oas_boundaries_jobs.explore(
    column="pot_cum_acc_15_car", cmap="plasma", scheme="NaturalBreaks", k=10
)
oas_boundaries_jobs.explore(
    column="pot_cum_acc_15_transit", cmap="plasma", scheme="NaturalBreaks", k=10
)
oas_boundaries_metric = oas_boundaries_wgs84.merge(
    gs_acc, left_on="geo_code", right_on="from_id", how="right"
)
oas_boundaries_metric.explore(
    column="area_15_transit", cmap="plasma", scheme="NaturalBreaks", k=10
)
oas_boundaries_metric.explore(
    column="area_15_car", cmap="plasma", scheme="NaturalBreaks", k=10
)