Disable parsing of JSONAssociations as they are not currently used within OwlBoard
All checks were successful
Go Test / test (push) Successful in 26s

This commit is contained in:
Fred Boniface 2024-04-10 21:47:08 +01:00
parent 3883031e04
commit fea7a5b831
2 changed files with 14 additions and 7 deletions

View File

@ -72,7 +72,6 @@ func parseCifDataStream(dataStream io.ReadCloser) (*parsedData, error) {
return nil, errors.New("unable to parse nil pointer")
}
// Initialise data structures
var parsed parsedData
parsed.assoc = make([]upstreamApi.JsonAssociationV1, 0)
parsed.sched = make([]upstreamApi.JsonScheduleV1, 0)
@ -99,12 +98,9 @@ func parseCifDataStream(dataStream io.ReadCloser) (*parsedData, error) {
}
parsed.header = timetable
case "JsonAssociationV1":
var association upstreamApi.JsonAssociationV1
if err := json.Unmarshal(value, &association); err != nil {
log.Msg.Error("Error decoding JSONAssociationV1 object", zap.Error(err))
continue
}
parsed.assoc = append(parsed.assoc, association)
// Association data is not currently used
// but may be used in the future
continue
case "JsonScheduleV1":
var schedule upstreamApi.JsonScheduleV1
if err := json.Unmarshal(value, &schedule); err != nil {

11
cif/readme.md Normal file
View File

@ -0,0 +1,11 @@
# package cif
This package follows a similar pattern to `package corpus`.
First, CheckCorpus() retreived cif metadata from the database and determines whether an update is required and what type of update.
Then, one of the update functions is called which run through the update process. There are two update types, 'full' and 'update'. A 'full' update drops the entire timetable collection and rebuilds with a full CIF download. 'update' downloads CIF updates from specified days and applies updates.
Downloads are handled by `package nrod` which returns an io.ReadCloser which is passed to the parsing function.
Currently the parsing function returns a parsedCif pointer, however this is using significant memory due to the size of a full CIF download (Often around 4.5GB). The intention is to instead use a worker pool to handle the data.