2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
package jobspec
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
|
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
|
|
|
"github.com/hashicorp/hcl"
|
|
|
|
"github.com/hashicorp/hcl/hcl/ast"
|
|
|
|
"github.com/hashicorp/nomad/api"
|
|
|
|
"github.com/mitchellh/mapstructure"
|
|
|
|
)
|
|
|
|
|
|
|
|
func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
|
|
|
list = list.Children()
|
|
|
|
if len(list.Items) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go through each object and turn it into an actual result.
|
|
|
|
collection := make([]*api.TaskGroup, 0, len(list.Items))
|
|
|
|
seen := make(map[string]struct{})
|
|
|
|
for _, item := range list.Items {
|
|
|
|
n := item.Keys[0].Token.Value().(string)
|
|
|
|
|
|
|
|
// Make sure we haven't already found this
|
|
|
|
if _, ok := seen[n]; ok {
|
|
|
|
return fmt.Errorf("group '%s' defined more than once", n)
|
|
|
|
}
|
|
|
|
seen[n] = struct{}{}
|
|
|
|
|
|
|
|
// We need this later
|
|
|
|
var listVal *ast.ObjectList
|
|
|
|
if ot, ok := item.Val.(*ast.ObjectType); ok {
|
|
|
|
listVal = ot.List
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("group '%s': should be an object", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for invalid keys
|
|
|
|
valid := []string{
|
|
|
|
"count",
|
|
|
|
"constraint",
|
2021-11-03 17:49:32 +00:00
|
|
|
"consul",
|
2019-08-09 19:18:53 +00:00
|
|
|
"affinity",
|
|
|
|
"restart",
|
|
|
|
"meta",
|
|
|
|
"task",
|
|
|
|
"ephemeral_disk",
|
|
|
|
"update",
|
|
|
|
"reschedule",
|
|
|
|
"vault",
|
|
|
|
"migrate",
|
|
|
|
"spread",
|
2019-11-18 16:16:25 +00:00
|
|
|
"shutdown_delay",
|
2019-08-09 19:18:53 +00:00
|
|
|
"network",
|
|
|
|
"service",
|
2019-07-25 14:42:11 +00:00
|
|
|
"volume",
|
2020-01-15 20:51:57 +00:00
|
|
|
"scaling",
|
2020-05-13 20:39:04 +00:00
|
|
|
"stop_after_client_disconnect",
|
2022-04-14 12:56:58 +00:00
|
|
|
"max_client_disconnect",
|
2019-08-09 19:18:53 +00:00
|
|
|
}
|
2020-09-03 11:34:04 +00:00
|
|
|
if err := checkHCLKeys(listVal, valid); err != nil {
|
2019-08-09 19:18:53 +00:00
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
|
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, item.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-11-18 16:16:25 +00:00
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
delete(m, "constraint")
|
2021-11-03 17:49:32 +00:00
|
|
|
delete(m, "consul")
|
2019-08-09 19:18:53 +00:00
|
|
|
delete(m, "affinity")
|
|
|
|
delete(m, "meta")
|
|
|
|
delete(m, "task")
|
|
|
|
delete(m, "restart")
|
|
|
|
delete(m, "ephemeral_disk")
|
|
|
|
delete(m, "update")
|
|
|
|
delete(m, "vault")
|
|
|
|
delete(m, "migrate")
|
|
|
|
delete(m, "spread")
|
|
|
|
delete(m, "network")
|
|
|
|
delete(m, "service")
|
2019-07-25 14:42:11 +00:00
|
|
|
delete(m, "volume")
|
2020-01-15 20:51:57 +00:00
|
|
|
delete(m, "scaling")
|
2019-08-09 19:18:53 +00:00
|
|
|
|
|
|
|
// Build the group with the basic decode
|
|
|
|
var g api.TaskGroup
|
2020-09-03 11:34:04 +00:00
|
|
|
g.Name = stringToPtr(n)
|
2019-11-18 16:16:25 +00:00
|
|
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
|
|
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
|
|
|
WeaklyTypedInput: true,
|
|
|
|
Result: &g,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := dec.Decode(m); err != nil {
|
2019-08-09 19:18:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse constraints
|
|
|
|
if o := listVal.Filter("constraint"); len(o.Items) > 0 {
|
|
|
|
if err := parseConstraints(&g.Constraints, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', constraint ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-03 17:49:32 +00:00
|
|
|
// Parse consul
|
|
|
|
if o := listVal.Filter("consul"); len(o.Items) > 0 {
|
|
|
|
if err := parseConsul(&g.Consul, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', consul ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
// Parse affinities
|
|
|
|
if o := listVal.Filter("affinity"); len(o.Items) > 0 {
|
|
|
|
if err := parseAffinities(&g.Affinities, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', affinity ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse restart policy
|
|
|
|
if o := listVal.Filter("restart"); len(o.Items) > 0 {
|
|
|
|
if err := parseRestartPolicy(&g.RestartPolicy, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', restart ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse spread
|
|
|
|
if o := listVal.Filter("spread"); len(o.Items) > 0 {
|
|
|
|
if err := parseSpread(&g.Spreads, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "spread ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse network
|
|
|
|
if o := listVal.Filter("network"); len(o.Items) > 0 {
|
2019-10-24 14:41:54 +00:00
|
|
|
networks, err := ParseNetwork(o)
|
2019-08-09 19:18:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
g.Networks = []*api.NetworkResource{networks}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse reschedule policy
|
|
|
|
if o := listVal.Filter("reschedule"); len(o.Items) > 0 {
|
|
|
|
if err := parseReschedulePolicy(&g.ReschedulePolicy, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', reschedule ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Parse ephemeral disk
|
|
|
|
if o := listVal.Filter("ephemeral_disk"); len(o.Items) > 0 {
|
|
|
|
g.EphemeralDisk = &api.EphemeralDisk{}
|
|
|
|
if err := parseEphemeralDisk(&g.EphemeralDisk, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', ephemeral_disk ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have an update strategy, then parse that
|
|
|
|
if o := listVal.Filter("update"); len(o.Items) > 0 {
|
|
|
|
if err := parseUpdate(&g.Update, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "update ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a migration strategy, then parse that
|
|
|
|
if o := listVal.Filter("migrate"); len(o.Items) > 0 {
|
|
|
|
if err := parseMigrate(&g.Migrate, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "migrate ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse out meta fields. These are in HCL as a list so we need
|
|
|
|
// to iterate over them and merge them.
|
|
|
|
if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
|
|
|
|
for _, o := range metaO.Elem().Items {
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, o.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := mapstructure.WeakDecode(m, &g.Meta); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 14:42:11 +00:00
|
|
|
// Parse any volume declarations
|
|
|
|
if o := listVal.Filter("volume"); len(o.Items) > 0 {
|
|
|
|
if err := parseVolumes(&g.Volumes, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "volume ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-15 20:51:57 +00:00
|
|
|
// Parse scaling policy
|
|
|
|
if o := listVal.Filter("scaling"); len(o.Items) > 0 {
|
2020-09-09 22:30:40 +00:00
|
|
|
if err := parseGroupScalingPolicy(&g.Scaling, o); err != nil {
|
2020-01-15 20:51:57 +00:00
|
|
|
return multierror.Prefix(err, "scaling ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
// Parse tasks
|
|
|
|
if o := listVal.Filter("task"); len(o.Items) > 0 {
|
|
|
|
if err := parseTasks(&g.Tasks, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', task:", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a vault block, then parse that
|
|
|
|
if o := listVal.Filter("vault"); len(o.Items) > 0 {
|
|
|
|
tgVault := &api.Vault{
|
Add `disable_file` parameter to job's `vault` stanza (#13343)
This complements the `env` parameter, so that the operator can author
tasks that don't share their Vault token with the workload when using
`image` filesystem isolation. As a result, more powerful tokens can be used
in a job definition, allowing it to use template stanzas to issue all kinds of
secrets (database secrets, Vault tokens with very specific policies, etc.),
without sharing that issuing power with the task itself.
This is accomplished by creating a directory called `private` within
the task's working directory, which shares many properties of
the `secrets` directory (tmpfs where possible, not accessible by
`nomad alloc fs` or Nomad's web UI), but isn't mounted into/bound to the
container.
If the `disable_file` parameter is set to `false` (its default), the Vault token
is also written to the NOMAD_SECRETS_DIR, so the default behavior is
backwards compatible. Even if the operator never changes the default,
they will still benefit from the improved behavior of Nomad never reading
the token back in from that - potentially altered - location.
2023-06-23 19:15:04 +00:00
|
|
|
Env: boolToPtr(true),
|
|
|
|
DisableFile: boolToPtr(false),
|
|
|
|
ChangeMode: stringToPtr("restart"),
|
2019-08-09 19:18:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := parseVault(tgVault, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go through the tasks and if they don't have a Vault block, set it
|
|
|
|
for _, task := range g.Tasks {
|
|
|
|
if task.Vault == nil {
|
|
|
|
task.Vault = tgVault
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if o := listVal.Filter("service"); len(o.Items) > 0 {
|
|
|
|
if err := parseGroupServices(&g, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s',", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
collection = append(collection, &g)
|
|
|
|
}
|
|
|
|
|
|
|
|
result.TaskGroups = append(result.TaskGroups, collection...)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-03 17:49:32 +00:00
|
|
|
func parseConsul(result **api.Consul, list *ast.ObjectList) error {
|
|
|
|
list = list.Elem()
|
|
|
|
if len(list.Items) > 1 {
|
|
|
|
return fmt.Errorf("only one 'consul' block allowed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get our consul object
|
|
|
|
obj := list.Items[0]
|
|
|
|
|
|
|
|
// Check for invalid keys
|
|
|
|
valid := []string{
|
|
|
|
"namespace",
|
|
|
|
}
|
|
|
|
if err := checkHCLKeys(obj.Val, valid); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, obj.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var consul api.Consul
|
|
|
|
if err := mapstructure.WeakDecode(m, &consul); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
*result = &consul
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
func parseEphemeralDisk(result **api.EphemeralDisk, list *ast.ObjectList) error {
|
|
|
|
list = list.Elem()
|
|
|
|
if len(list.Items) > 1 {
|
|
|
|
return fmt.Errorf("only one 'ephemeral_disk' block allowed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get our ephemeral_disk object
|
|
|
|
obj := list.Items[0]
|
|
|
|
|
|
|
|
// Check for invalid keys
|
|
|
|
valid := []string{
|
|
|
|
"sticky",
|
|
|
|
"size",
|
|
|
|
"migrate",
|
|
|
|
}
|
2020-09-03 11:34:04 +00:00
|
|
|
if err := checkHCLKeys(obj.Val, valid); err != nil {
|
2019-08-09 19:18:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, obj.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var ephemeralDisk api.EphemeralDisk
|
|
|
|
if err := mapstructure.WeakDecode(m, &ephemeralDisk); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
*result = &ephemeralDisk
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error {
|
|
|
|
list = list.Elem()
|
|
|
|
if len(list.Items) > 1 {
|
|
|
|
return fmt.Errorf("only one 'restart' block allowed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get our job object
|
|
|
|
obj := list.Items[0]
|
|
|
|
|
|
|
|
// Check for invalid keys
|
|
|
|
valid := []string{
|
|
|
|
"attempts",
|
|
|
|
"interval",
|
|
|
|
"delay",
|
|
|
|
"mode",
|
2023-07-28 18:54:00 +00:00
|
|
|
"render_templates",
|
2019-08-09 19:18:53 +00:00
|
|
|
}
|
2020-09-03 11:34:04 +00:00
|
|
|
if err := checkHCLKeys(obj.Val, valid); err != nil {
|
2019-08-09 19:18:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, obj.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var result api.RestartPolicy
|
|
|
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
|
|
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
|
|
|
WeaklyTypedInput: true,
|
|
|
|
Result: &result,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := dec.Decode(m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
*final = &result
|
|
|
|
return nil
|
|
|
|
}
|
2019-07-25 14:42:11 +00:00
|
|
|
|
2019-08-12 14:22:27 +00:00
|
|
|
func parseVolumes(out *map[string]*api.VolumeRequest, list *ast.ObjectList) error {
|
2020-03-23 17:55:26 +00:00
|
|
|
hcl.DecodeObject(out, list)
|
2019-07-25 14:42:11 +00:00
|
|
|
|
2020-03-23 17:55:26 +00:00
|
|
|
for k, v := range *out {
|
2020-09-03 11:34:04 +00:00
|
|
|
err := unusedKeys(v)
|
2019-07-25 14:42:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-23 17:55:26 +00:00
|
|
|
// This is supported by `hcl:",key"`, but that only works if we start at the
|
|
|
|
// parent ast.ObjectItem
|
|
|
|
v.Name = k
|
2019-07-25 14:42:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2020-01-15 20:51:57 +00:00
|
|
|
|
2020-09-09 22:30:40 +00:00
|
|
|
func parseGroupScalingPolicy(out **api.ScalingPolicy, list *ast.ObjectList) error {
|
2020-01-15 20:51:57 +00:00
|
|
|
if len(list.Items) > 1 {
|
|
|
|
return fmt.Errorf("only one 'scaling' block allowed")
|
|
|
|
}
|
2020-09-09 22:30:40 +00:00
|
|
|
item := list.Items[0]
|
|
|
|
if len(item.Keys) != 0 {
|
|
|
|
return fmt.Errorf("task group scaling policy should not have a name")
|
|
|
|
}
|
|
|
|
p, err := parseScalingPolicy(item)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-01-15 20:51:57 +00:00
|
|
|
|
2020-09-09 22:30:40 +00:00
|
|
|
// group-specific validation
|
|
|
|
if p.Max == nil {
|
|
|
|
return fmt.Errorf("missing 'max'")
|
|
|
|
}
|
|
|
|
if p.Type == "" {
|
|
|
|
p.Type = "horizontal"
|
|
|
|
} else if p.Type != "horizontal" {
|
|
|
|
return fmt.Errorf("task group scaling policy had invalid type: %q", p.Type)
|
|
|
|
}
|
|
|
|
*out = p
|
|
|
|
return nil
|
|
|
|
}
|
2020-01-15 20:51:57 +00:00
|
|
|
|
2020-09-09 22:30:40 +00:00
|
|
|
func parseScalingPolicy(item *ast.ObjectItem) (*api.ScalingPolicy, error) {
|
2020-01-16 15:32:00 +00:00
|
|
|
// We need this later
|
|
|
|
var listVal *ast.ObjectList
|
2020-09-09 22:30:40 +00:00
|
|
|
if ot, ok := item.Val.(*ast.ObjectType); ok {
|
2020-01-16 15:32:00 +00:00
|
|
|
listVal = ot.List
|
|
|
|
} else {
|
2020-09-09 22:30:40 +00:00
|
|
|
return nil, fmt.Errorf("should be an object")
|
2020-01-16 15:32:00 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 20:51:57 +00:00
|
|
|
valid := []string{
|
2020-03-19 14:30:14 +00:00
|
|
|
"min",
|
|
|
|
"max",
|
2020-01-15 20:51:57 +00:00
|
|
|
"policy",
|
|
|
|
"enabled",
|
2020-09-09 22:30:40 +00:00
|
|
|
"type",
|
2020-01-15 20:51:57 +00:00
|
|
|
}
|
2020-09-09 22:30:40 +00:00
|
|
|
if err := checkHCLKeys(item.Val, valid); err != nil {
|
|
|
|
return nil, err
|
2020-01-15 20:51:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
2020-09-09 22:30:40 +00:00
|
|
|
if err := hcl.DecodeObject(&m, item.Val); err != nil {
|
|
|
|
return nil, err
|
2020-01-15 20:51:57 +00:00
|
|
|
}
|
2020-01-16 15:32:00 +00:00
|
|
|
delete(m, "policy")
|
2020-01-15 20:51:57 +00:00
|
|
|
|
|
|
|
var result api.ScalingPolicy
|
|
|
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
|
|
|
WeaklyTypedInput: true,
|
|
|
|
Result: &result,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2020-09-09 22:30:40 +00:00
|
|
|
return nil, err
|
2020-01-15 20:51:57 +00:00
|
|
|
}
|
|
|
|
if err := dec.Decode(m); err != nil {
|
2020-09-09 22:30:40 +00:00
|
|
|
return nil, err
|
2020-07-04 19:05:50 +00:00
|
|
|
}
|
2020-01-15 20:51:57 +00:00
|
|
|
|
2020-01-16 15:32:00 +00:00
|
|
|
// If we have policy, then parse that
|
|
|
|
if o := listVal.Filter("policy"); len(o.Items) > 0 {
|
2020-04-23 12:37:45 +00:00
|
|
|
if len(o.Elem().Items) > 1 {
|
2020-09-09 22:30:40 +00:00
|
|
|
return nil, fmt.Errorf("only one 'policy' block allowed per 'scaling' block")
|
2020-04-23 12:37:45 +00:00
|
|
|
}
|
|
|
|
p := o.Elem().Items[0]
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, p.Val); err != nil {
|
2020-09-09 22:30:40 +00:00
|
|
|
return nil, err
|
2020-04-23 12:37:45 +00:00
|
|
|
}
|
|
|
|
if err := mapstructure.WeakDecode(m, &result.Policy); err != nil {
|
2020-09-09 22:30:40 +00:00
|
|
|
return nil, err
|
2020-01-16 15:32:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-09 22:30:40 +00:00
|
|
|
return &result, nil
|
2020-01-15 20:51:57 +00:00
|
|
|
}
|