diff --git a/Makefile b/Makefile index 8712a40cc7..9bb1eb3c66 100644 --- a/Makefile +++ b/Makefile @@ -236,7 +236,7 @@ ci-test-watch: ginkgo go-test: . ./env.test.sh && $(TIMING_CMD) go test $(GOTEST_FLAGS) $(GOTEST_PKGS) -go-ci-test: GOTEST_FLAGS += -count=1 -race -shuffle=on -cover +go-ci-test: override GOTEST_FLAGS += -count=1 -race -shuffle=on -cover go-ci-test: GOTEST_PKGS = ./... go-ci-test: go-test diff --git a/alerts/client.go b/alerts/client.go index dcaafce96c..fae857a660 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -3,11 +3,12 @@ package alerts import ( "context" "net/http" + "time" "github.com/kelseyhightower/envconfig" - "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/errors" platformlog "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/log/null" "github.com/tidepool-org/platform/platform" @@ -16,22 +17,20 @@ import ( // Client for managing alerts configs. type Client struct { - client PlatformClient - logger platformlog.Logger - tokenProvider auth.ServerSessionTokenProvider + client PlatformClient + logger platformlog.Logger } // NewClient builds a client for interacting with alerts API endpoints. // // If no logger is provided, a null logger is used. -func NewClient(client PlatformClient, tokenProvider auth.ServerSessionTokenProvider, logger platformlog.Logger) *Client { +func NewClient(client PlatformClient, logger platformlog.Logger) *Client { if logger == nil { logger = null.NewLogger() } return &Client{ - client: client, - logger: logger, - tokenProvider: tokenProvider, + client: client, + logger: logger, } } @@ -44,34 +43,69 @@ type PlatformClient interface { // request performs common operations before passing a request off to the // underlying platform.Client. -func (c *Client) request(ctx context.Context, method, url string, body any) error { +func (c *Client) request(ctx context.Context, method, url string, reqBody, resBody any) error { // Platform's client.Client expects a logger to exist in the request's // context. If it doesn't exist, request processing will panic. loggingCtx := platformlog.NewContextWithLogger(ctx, c.logger) - // Make sure the auth token is injected into the request's headers. - return c.requestWithAuth(loggingCtx, method, url, body) -} - -// requestWithAuth injects an auth token before calling platform.Client.RequestData. -// -// At time of writing, this is the only way to inject credentials into -// platform.Client. It might be nice to be able to use a mutator, but the auth -// is specifically handled by the platform.Client via the context field, and -// if left blank, platform.Client errors. -func (c *Client) requestWithAuth(ctx context.Context, method, url string, body any) error { - return c.client.RequestData(auth.NewContextWithServerSessionTokenProvider(ctx, c.tokenProvider), method, url, nil, body, nil) + return c.client.RequestData(loggingCtx, method, url, nil, reqBody, resBody) } // Upsert updates cfg if it exists or creates it if it doesn't. func (c *Client) Upsert(ctx context.Context, cfg *Config) error { url := c.client.ConstructURL("v1", "users", cfg.FollowedUserID, "followers", cfg.UserID, "alerts") - return c.request(ctx, http.MethodPost, url, cfg) + return c.request(ctx, http.MethodPost, url, cfg, nil) } // Delete the alerts config. func (c *Client) Delete(ctx context.Context, cfg *Config) error { url := c.client.ConstructURL("v1", "users", cfg.FollowedUserID, "followers", cfg.UserID, "alerts") - return c.request(ctx, http.MethodDelete, url, nil) + return c.request(ctx, http.MethodDelete, url, nil, nil) +} + +// Get a user's alerts configuration for the followed user. +func (c *Client) Get(ctx context.Context, followedUserID, userID string) (*Config, error) { + url := c.client.ConstructURL("v1", "users", followedUserID, "followers", userID, "alerts") + config := &Config{} + err := c.request(ctx, http.MethodGet, url, nil, config) + if err != nil { + return nil, errors.Wrap(err, "Unable to request alerts config") + } + return config, nil +} + +// List the alerts configurations that follow the given user. +// +// This method should only be called via an authenticated service session. +func (c *Client) List(ctx context.Context, followedUserID string) ([]*Config, error) { + url := c.client.ConstructURL("v1", "users", followedUserID, "followers", "alerts") + configs := []*Config{} + err := c.request(ctx, http.MethodGet, url, nil, &configs) + if err != nil { + c.logger.Debugf("unable to request alerts configs list: %+v %T", err, err) + return nil, errors.Wrap(err, "Unable to request alerts configs list") + } + return configs, nil +} + +// OverdueCommunications are those that haven't communicated in some time. +// +// This method should only be called via an authenticated service session. +func (c *Client) OverdueCommunications(ctx context.Context) ([]LastCommunication, error) { + url := c.client.ConstructURL("v1", "users", "overdue_communications") + lastComms := []LastCommunication{} + err := c.request(ctx, http.MethodGet, url, nil, &lastComms) + if err != nil { + c.logger.Debugf("getting users overdue to communicate: \"%+v\" %T", err, err) + return nil, errors.Wrap(err, "Unable to list overdue communications") + } + return lastComms, nil +} + +// LastCommunication records the last time data was received from a user. +type LastCommunication struct { + UserID string `bson:"userId" json:"userId"` + DataSetID string `bson:"dataSetId" json:"dataSetId"` + LastReceivedDeviceData time.Time `bson:"lastReceivedDeviceData" json:"lastReceivedDeviceData"` } // ConfigLoader abstracts the method by which config values are loaded. diff --git a/alerts/client_test.go b/alerts/client_test.go index c5a771256f..cb647cbfa4 100644 --- a/alerts/client_test.go +++ b/alerts/client_test.go @@ -8,7 +8,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/client" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/log/null" @@ -16,10 +15,13 @@ import ( ) const testToken = "auth-me" +const testUserID = "test-user-id" +const testFollowedUserID = "test-followed-user-id" +const testDataSetID = "upid_000000000000" var _ = Describe("Client", func() { - var test404Server, test200Server *httptest.Server - var testAuthServer func(*string) *httptest.Server + var test404Server *httptest.Server + var test200Server func(string) *httptest.Server BeforeEach(func() { t := GinkgoT() @@ -28,72 +30,103 @@ var _ = Describe("Client", func() { test404Server = testServer(t, func(w http.ResponseWriter, r *http.Request) { http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) }) - test200Server = testServer(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - testAuthServer = func(token *string) *httptest.Server { + test200Server = func(resp string) *httptest.Server { return testServer(t, func(w http.ResponseWriter, r *http.Request) { - *token = r.Header.Get(auth.TidepoolSessionTokenHeaderKey) w.WriteHeader(http.StatusOK) + w.Write([]byte(resp)) }) } }) - Context("Delete", func() { - It("returns an error on non-200 responses", func() { + ItReturnsAnErrorOnNon200Responses := func(f func(context.Context, *Client) error) { + GinkgoHelper() + It("returns an error on non-200 respnoses", func() { client, ctx := newAlertsClientTest(test404Server) - err := client.Delete(ctx, &Config{}) + err := f(ctx, client) Expect(err).Should(HaveOccurred()) Expect(err).To(MatchError(ContainSubstring("resource not found"))) }) + } - It("returns nil on success", func() { - client, ctx := newAlertsClientTest(test200Server) - err := client.Delete(ctx, &Config{}) - Expect(err).ShouldNot(HaveOccurred()) + ItReturnsANilErrorOnSuccess := func(resp string, f func(context.Context, *Client) error) { + GinkgoHelper() + It("returns a nil error on success", func() { + client, ctx := newAlertsClientTest(test200Server(resp)) + err := f(ctx, client) + Expect(err).To(Succeed()) + }) + } + + Context("Delete", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + return client.Delete(ctx, &Config{}) }) - It("injects an auth token", func() { - token := "" - client, ctx := newAlertsClientTest(testAuthServer(&token)) - _ = client.Delete(ctx, &Config{}) - Expect(token).To(Equal(testToken)) + ItReturnsANilErrorOnSuccess("", func(ctx context.Context, client *Client) error { + return client.Delete(ctx, &Config{}) }) }) Context("Upsert", func() { - It("returns an error on non-200 responses", func() { - client, ctx := newAlertsClientTest(test404Server) - err := client.Upsert(ctx, &Config{}) - Expect(err).Should(HaveOccurred()) - Expect(err).To(MatchError(ContainSubstring("resource not found"))) + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + return client.Upsert(ctx, &Config{}) + }) + + ItReturnsANilErrorOnSuccess("", func(ctx context.Context, client *Client) error { + return client.Upsert(ctx, &Config{}) + }) + }) + + Context("Get", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.Get(ctx, testFollowedUserID, testUserID) + return err }) - It("returns nil on success", func() { - client, ctx := newAlertsClientTest(test200Server) - err := client.Upsert(ctx, &Config{}) - Expect(err).ShouldNot(HaveOccurred()) + ret := `{ + "userId": "14ee703f-ca9b-4a6b-9ce3-41d886514e7f", + "followedUserId": "ce5863bc-cc0b-4177-97d7-e8de0c558820", + "uploadId": "upid_00000000000000000000000000000000" + }` + ItReturnsANilErrorOnSuccess(ret, func(ctx context.Context, client *Client) error { + _, err := client.Get(ctx, testFollowedUserID, testUserID) + return err }) + }) - It("injects an auth token", func() { - token := "" - client, ctx := newAlertsClientTest(testAuthServer(&token)) - _ = client.Upsert(ctx, &Config{}) - Expect(token).To(Equal(testToken)) + Context("List", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.List(ctx, "") + return err + }) + + ItReturnsANilErrorOnSuccess("[]", func(ctx context.Context, client *Client) error { + _, err := client.List(ctx, "") + return err + }) + }) + + Context("OverdueCommunications", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.OverdueCommunications(ctx) + return err + }) + + ItReturnsANilErrorOnSuccess("[]", func(ctx context.Context, client *Client) error { + _, err := client.OverdueCommunications(ctx) + return err }) }) }) func buildTestClient(s *httptest.Server) *Client { pCfg := &platform.Config{ - Config: &client.Config{ - Address: s.URL, - }, + Config: &client.Config{Address: s.URL}, + ServiceSecret: "auth-me", } - token := mockTokenProvider(testToken) pc, err := platform.NewClient(pCfg, platform.AuthorizeAsService) Expect(err).ToNot(HaveOccurred()) - client := NewClient(pc, token, null.NewLogger()) + client := NewClient(pc, null.NewLogger()) return client } @@ -101,14 +134,14 @@ func newAlertsClientTest(server *httptest.Server) (*Client, context.Context) { return buildTestClient(server), contextWithNullLogger() } -func contextWithNullLogger() context.Context { - return log.NewContextWithLogger(context.Background(), null.NewLogger()) +func contextWithNullLoggerDeluxe() (context.Context, log.Logger) { + lgr := null.NewLogger() + return log.NewContextWithLogger(context.Background(), lgr), lgr } -type mockTokenProvider string - -func (p mockTokenProvider) ServerSessionToken() (string, error) { - return string(p), nil +func contextWithNullLogger() context.Context { + ctx, _ := contextWithNullLoggerDeluxe() + return ctx } func testServer(t GinkgoTInterface, handler http.HandlerFunc) *httptest.Server { diff --git a/alerts/config.go b/alerts/config.go index 67f2b1d72c..823c2ee45f 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -6,10 +6,17 @@ import ( "bytes" "context" "encoding/json" + "os" + "slices" "time" "github.com/tidepool-org/platform/data" - "github.com/tidepool-org/platform/data/blood/glucose" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" "github.com/tidepool-org/platform/structure" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/user" @@ -17,9 +24,8 @@ import ( // Config wraps Alerts to include user relationships. // -// As a wrapper type, Config provides a clear demarcation of what a user -// controls (Alerts) and what is set by the service (the other values in -// Config). +// As a wrapper type, Config provides a clear demarcation of what a user controls (Alerts) +// and what is set by the service (the other values in Config). type Config struct { // UserID receives the configured alerts and owns this Config. UserID string `json:"userId" bson:"userId"` @@ -31,57 +37,251 @@ type Config struct { // UploadID identifies the device dataset for which these alerts apply. UploadID string `json:"uploadId" bson:"uploadId,omitempty"` - Alerts `bson:",inline,omitempty"` + // Alerts collects the user settings for each type of alert, and tracks their statuses. + Alerts `bson:"alerts,omitempty"` + + Activity `bson:"activity,omitempty" json:"activity,omitempty"` } -// Alerts models a user's desired alerts. +// Alerts is a wrapper to collect the user-modifiable parts of a Config. type Alerts struct { UrgentLow *UrgentLowAlert `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` Low *LowAlert `json:"low,omitempty" bson:"low,omitempty"` High *HighAlert `json:"high,omitempty" bson:"high,omitempty"` NotLooping *NotLoopingAlert `json:"notLooping,omitempty" bson:"notLooping,omitempty"` - NoCommunication *NoCommunicationAlert `json:"noCommunication,omitempty" bson:"noCommunication,omitempty"` + NoCommunication *NoCommunicationAlert `bson:"noCommunication,omitempty" json:"noCommunication,omitempty"` +} + +type Activity struct { + UrgentLow AlertActivity `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` + Low AlertActivity `json:"low,omitempty" bson:"low,omitempty"` + High AlertActivity `json:"high,omitempty" bson:"high,omitempty"` + NotLooping AlertActivity `json:"notLooping,omitempty" bson:"notLooping,omitempty"` + NoCommunication AlertActivity `json:"noCommunication,omitempty" bson:"noCommunication,omitempty"` } func (c Config) Validate(validator structure.Validator) { validator.String("userID", &c.UserID).Using(user.IDValidator) validator.String("followedUserID", &c.FollowedUserID).Using(user.IDValidator) validator.String("uploadID", &c.UploadID).Exists().Using(data.SetIDValidator) - c.Alerts.Validate(validator) + if c.Alerts.UrgentLow != nil { + c.Alerts.UrgentLow.Validate(validator) + } + if c.Alerts.Low != nil { + c.Alerts.Low.Validate(validator) + } + if c.Alerts.High != nil { + c.Alerts.High.Validate(validator) + } + if c.Alerts.NotLooping != nil { + c.Alerts.NotLooping.Validate(validator) + } + if c.Alerts.NoCommunication != nil { + c.Alerts.NoCommunication.Validate(validator) + } +} + +// EvaluateData alerts in the context of the provided data. +// +// While this method, or the methods it calls, can fail, there's no point in returning an +// error. Instead errors are logged before continuing. This is to ensure that any possible +// alert that should be triggered, will be triggered. +func (c *Config) EvaluateData(ctx context.Context, gd []*Glucose, + dd []*DosingDecision) (*Notification, bool) { + + var n *Notification + var needsUpsert bool + + ul, low, high, nl := EvalResult{}, EvalResult{}, EvalResult{}, EvalResult{} + if c.Alerts.UrgentLow != nil && c.Alerts.UrgentLow.Enabled { + ul = c.Alerts.UrgentLow.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.UrgentLow.Update(ul.OutOfRange) + } + if c.Alerts.Low != nil && c.Alerts.Low.Enabled { + low = c.Alerts.Low.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.Low.Update(low.OutOfRange) + } + if c.Alerts.High != nil && c.Alerts.High.Enabled { + high = c.Alerts.High.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.High.Update(high.OutOfRange) + } + if c.Alerts.NotLooping != nil && c.Alerts.NotLooping.Enabled { + nl = c.Alerts.NotLooping.Evaluate(ctx, dd) + needsUpsert = needsUpsert || c.Activity.NotLooping.Update(nl.OutOfRange) + } + + if ul.OutOfRange { + if isReEval(c.Activity.UrgentLow.Sent, ul.NewestTime) { + return nil, needsUpsert + } + msg := genGlucoseThresholdMessage("below urgent low") + return c.newNotification(msg, &c.Activity.UrgentLow), needsUpsert + } + if low.OutOfRange { + if isReEval(c.Activity.Low.Sent, low.NewestTime) { + return nil, needsUpsert + } + delay := c.Alerts.Low.Delay.Duration() + + if time.Since(low.Started) > delay { + repeat := c.Alerts.Low.Repeat + if !c.Activity.Low.IsSent() || mayRepeat(repeat, c.Activity.Low.Sent) { + msg := genGlucoseThresholdMessage("below low") + return c.newNotification(msg, &c.Activity.Low), needsUpsert + + } + } + return nil, needsUpsert + } + if high.OutOfRange { + if isReEval(c.Activity.High.Sent, high.NewestTime) { + return nil, needsUpsert + } + delay := c.Alerts.High.Delay.Duration() + if time.Since(high.Started) > delay { + repeat := c.Alerts.High.Repeat + if !c.Activity.High.IsSent() || mayRepeat(repeat, c.Activity.High.Sent) { + msg := genGlucoseThresholdMessage("above high") + return c.newNotification(msg, &c.Activity.High), needsUpsert + } + } + } + if nl.OutOfRange { + // Because not looping doesn't use a threshold, re-evaluations aren't treated any + // differently. + delay := c.Alerts.NotLooping.Delay.Duration() + if delay == 0 { + delay = NotLoopingRepeat + } + if time.Since(c.Activity.NotLooping.Sent) > delay { + return c.newNotification(NotLoopingMessage, &c.Activity.NotLooping), needsUpsert + } + } + + return n, needsUpsert +} + +func mayRepeat(repeat DurationMinutes, lastSent time.Time) bool { + return repeat.Duration() > 0 && time.Since(lastSent) > repeat.Duration() +} + +func (c *Config) newNotification(msg string, act *AlertActivity) *Notification { + return &Notification{ + FollowedUserID: c.FollowedUserID, + RecipientUserID: c.UserID, + Message: msg, + Sent: func(t time.Time) { + if t.After(act.Sent) { + act.Sent = t + } + }, + } +} + +func (c Config) LoggerWithFields(lgr log.Logger) log.Logger { + return lgr.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + "dataSetID": c.UploadID, + }) +} + +func isReEval(t1, t2 time.Time) bool { + return t1.After(t2) } -func (a Alerts) Validate(validator structure.Validator) { - if a.UrgentLow != nil { - a.UrgentLow.Validate(validator) +func (c *Config) EvaluateNoCommunication(ctx context.Context, + lgr log.Logger, last time.Time) (*Notification, bool) { + + if c.Alerts.NoCommunication == nil || !c.Alerts.NoCommunication.Enabled { + return nil, false } - if a.Low != nil { - a.Low.Validate(validator) + + ctx = log.NewContextWithLogger(ctx, lgr) + nc := c.Alerts.NoCommunication.Evaluate(ctx, last) + needsUpsert := c.Activity.NoCommunication.Update(nc.OutOfRange) + delay := c.Alerts.NoCommunication.Delay.Duration() + if delay == 0 { + delay = DefaultNoCommunicationDelay + } + if time.Since(nc.Started) > delay && time.Since(c.Activity.NoCommunication.Sent) > delay { + n := c.newNotification(NoCommunicationMessage, &c.Activity.NoCommunication) + return n, needsUpsert + } + return nil, needsUpsert +} + +// LongestDelay of the delays set on enabled alerts. +func (a Alerts) LongestDelay() time.Duration { + delays := []time.Duration{} + if a.Low != nil && a.Low.Enabled { + delays = append(delays, a.Low.Delay.Duration()) } - if a.High != nil { - a.High.Validate(validator) + if a.High != nil && a.High.Enabled { + delays = append(delays, a.High.Delay.Duration()) } - if a.NotLooping != nil { - a.NotLooping.Validate(validator) + if a.NotLooping != nil && a.NotLooping.Enabled { + delays = append(delays, a.NotLooping.Delay.Duration()) } - if a.NoCommunication != nil { - a.NoCommunication.Validate(validator) + if len(delays) == 0 { + return 0 } + return slices.Max(delays) } // Base describes the minimum specifics of a desired alert. type Base struct { // Enabled controls whether notifications should be sent for this alert. Enabled bool `json:"enabled" bson:"enabled"` - // Repeat is measured in minutes. - // - // A value of 0 (the default) disables repeat notifications. - Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) - dur := b.Repeat.Duration() - validator.Duration("repeat", &dur).Using(validateRepeat) +} + +func (b Base) Evaluate(ctx context.Context, data []*Glucose) *Notification { + if lgr := log.LoggerFromContext(ctx); lgr != nil { + lgr.Warn("alerts.Base.Evaluate called, this shouldn't happen!") + } + return nil +} + +func (b Base) lgr(ctx context.Context) log.Logger { + var lgr log.Logger = log.LoggerFromContext(ctx) + if lgr == nil { + // NewLogger can only fail if os.Stderr is nil. + lgr, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + } + return lgr +} + +type AlertActivity struct { + // Triggered records the last time this alert was triggered. + Triggered time.Time `json:"triggered" bson:"triggered"` + // Sent records the last time this alert was sent. + Sent time.Time `json:"sent" bson:"sent"` + // Resolved records the last time this alert was resolved. + Resolved time.Time `json:"resolved" bson:"resolved"` +} + +func (a AlertActivity) IsActive() bool { + return a.Triggered.After(a.Resolved) +} + +func (a AlertActivity) IsSent() bool { + return a.Sent.After(a.Triggered) +} + +func (a *AlertActivity) Update(outOfRange bool) bool { + changed := false + if outOfRange && !a.IsActive() { + a.Triggered = time.Now() + changed = true + } else if !outOfRange && a.IsActive() { + a.Resolved = time.Now() + changed = true + } + return changed } const ( @@ -110,7 +310,7 @@ type UrgentLowAlert struct { Base `bson:",inline"` // Threshold is compared the current value to determine if an alert should // be triggered. - Threshold `json:"threshold"` + Threshold `json:"threshold" bson:"threshold"` } func (a UrgentLowAlert) Validate(validator structure.Validator) { @@ -118,6 +318,112 @@ func (a UrgentLowAlert) Validate(validator structure.Validator) { a.Threshold.Validate(validator) } +type EvalResult struct { + Name string + Started time.Time + Threshold float64 + NewestTime time.Time + NewestValue float64 + Evaluator func(dv, tv float64) bool `json:"-"` + OutOfRange bool +} + +func (r EvalResult) String() string { + b, err := json.Marshal(r) + if err != nil { + return "" + } + return string(b) +} + +func (r *EvalResult) Process(ctx context.Context, t Threshold, data []*Glucose) { + for _, datum := range data { + dv, tv, err := normalizeUnits(datum, t) + if err != nil { + r.lgr(ctx).WithError(err).Info("Unable to normalize datum") + continue + } + + if datum.Time == nil { + r.lgr(ctx).Warn("Unable to process: Time == nil; that shouldn't be possible") + continue + } + + outOfRange := r.Evaluator(dv, tv) + + if r.NewestValue == 0 { + r.NewestValue = dv + r.NewestTime = *datum.Time + r.OutOfRange = outOfRange + r.Threshold = tv + r.logGlucoseEval(ctx) + } + + if !outOfRange { + break + } + + if datum.Time != nil && (r.Started.IsZero() || datum.Time.Before(r.Started)) { + r.Started = *datum.Time + } + } +} + +// Evaluate urgent low condition. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := EvalResult{ + Name: "urgent low", + Evaluator: func(dv, tv float64) bool { return dv < tv }, + } + er.Process(ctx, a.Threshold, data) + return er +} + +func (r EvalResult) logGlucoseEval(ctx context.Context) { + fields := log.Fields{ + "isAlerting?": r.Evaluator(r.NewestValue, r.Threshold), + "threshold": r.Threshold, + "value": r.NewestValue, + } + r.lgr(ctx).WithFields(fields).Info(r.Name) +} + +func (r EvalResult) lgr(ctx context.Context) log.Logger { + var lgr log.Logger = log.LoggerFromContext(ctx) + if lgr == nil { + // NewLogger can only fail if os.Stderr is nil. + lgr, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + } + return lgr +} + +func normalizeUnits(datum *Glucose, t Threshold) (float64, float64, error) { + if datum == nil || datum.Blood.Units == nil || datum.Blood.Value == nil { + return 0, 0, errors.Newf("Unable to evaluate datum: Units or Value is nil") + } + + // Both units are the same, no need to convert either. + if t.Units == *datum.Blood.Units { + return *datum.Blood.Value, t.Value, nil + } + + // The units don't match. There exists a known good function that converts to MmolL, so + // we'll convert whichever value isn't in MmolL to MmolL. + + if dataBloodGlucose.IsMmolL(t.Units) { + n := dataBloodGlucose.NormalizeValueForUnits(datum.Blood.Value, datum.Blood.Units) + return *n, t.Value, nil + } else if dataBloodGlucose.IsMmolL(*datum.Blood.Units) { + n := dataBloodGlucose.NormalizeValueForUnits(&t.Value, &t.Units) + return *datum.Blood.Value, *n, nil + } + + // This shouldn't happen. It indicates a new, third glucose unit is in use. + return 0, 0, errors.New("Unable to handle unit conversion, neither is MmolL") +} + // NotLoopingAlert extends Base with a delay. type NotLoopingAlert struct { Base `bson:",inline"` @@ -130,9 +436,62 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 2*time.Hour) } -// NoCommunicationAlert extends Base with a delay. +// Evaluate if the device is looping. +func (a *NotLoopingAlert) Evaluate(ctx context.Context, decisions []*DosingDecision) EvalResult { + er := EvalResult{} + for _, decision := range decisions { + if decision.Reason == nil || *decision.Reason != DosingDecisionReasonLoop { + continue + } + if decision.Time == nil { + a.lgr(ctx).Warn("Unable to process: Time == nil; that shouldn't be possible") + continue + } + if !decision.Time.IsZero() { + er.NewestTime = *decision.Time + break + } + } + delay := a.Delay.Duration() + if delay == 0 { + delay = DefaultNotLoopingDelay + } + er.OutOfRange = time.Since(er.NewestTime) > delay + logNotLoopingEvaluation(a.lgr(ctx), er.OutOfRange, time.Since(er.NewestTime), delay) + + return er +} + +// DefaultNotLoopingDelay is used when the delay has a Zero value (its default). +const DefaultNotLoopingDelay = 30 * time.Minute + +func logNotLoopingEvaluation(lgr log.Logger, isAlerting bool, since, threshold time.Duration) { + fields := log.Fields{ + "isAlerting?": isAlerting, + "value": since, + "threshold": threshold, + } + lgr.WithFields(fields).Info("not looping") +} + +const NotLoopingMessage = "Loop is not able to loop" + +// DosingDecisionReasonLoop is specified in a [DosingDecision] to indicate +// that the decision is part of a loop adjustment (as opposed to bolus or something else). +const DosingDecisionReasonLoop string = "loop" + +// NotLoopingRepeat is the interval between sending notifications when not looping. +const NotLoopingRepeat = 5 * time.Minute + +// NoCommunicationAlert is configured to send notifications when no data is received. +// +// It differs fundamentally from DataAlerts in that it is polled instead of being triggered +// when data is received. type NoCommunicationAlert struct { - Base `bson:",inline"` + Base `bson:",inline"` + // Delay represents the time after which a No Communication alert should be sent. + // + // A value of 0 is the default, and is treated as five minutes. Delay DurationMinutes `json:"delay,omitempty"` } @@ -142,6 +501,34 @@ func (a NoCommunicationAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 6*time.Hour) } +// Evaluate if the time since data was last received warrants a notification. +func (a *NoCommunicationAlert) Evaluate(ctx context.Context, lastReceived time.Time) EvalResult { + er := EvalResult{} + + if lastReceived.IsZero() { + a.lgr(ctx).Info("Unable to evaluate no communication: time is Zero") + return er + } + + delay := a.Delay.Duration() + if delay == 0 { + delay = DefaultNoCommunicationDelay + } + er.OutOfRange = time.Since(lastReceived) > delay + er.Started = lastReceived + er.NewestTime = lastReceived + a.lgr(ctx).WithField("isAlerting?", er.OutOfRange).Info("no communication") + + return er +} + +const ( + DefaultNoCommunicationDelay = 5 * time.Minute + MinimumNoCommunicationDelay = 5 * time.Minute +) + +const NoCommunicationMessage = "Tidepool is unable to communicate with a user's device" + // LowAlert extends Base with threshold and a delay. type LowAlert struct { Base `bson:",inline"` @@ -149,13 +536,35 @@ type LowAlert struct { // be triggered. Threshold `json:"threshold"` Delay DurationMinutes `json:"delay,omitempty"` + // Repeat is measured in minutes. + // + // A value of 0 (the default) disables repeat notifications. + Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (a LowAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) - dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 2*time.Hour) + delayDur := a.Delay.Duration() + validator.Duration("delay", &delayDur).InRange(0, 2*time.Hour) a.Threshold.Validate(validator) + repeatDur := a.Repeat.Duration() + validator.Duration("repeat", &repeatDur).Using(validateRepeat) +} + +// Evaluate the given data to determine if an alert should be sent. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *LowAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := EvalResult{ + Name: "low", + Evaluator: func(dv, tv float64) bool { return dv < tv }, + } + er.Process(ctx, a.Threshold, data) + return er +} + +func genGlucoseThresholdMessage(alertType string) string { + return "Glucose reading " + alertType + " threshold" } // HighAlert extends Base with a threshold and a delay. @@ -165,13 +574,31 @@ type HighAlert struct { // be triggered. Threshold `json:"threshold"` Delay DurationMinutes `json:"delay,omitempty"` + // Repeat is measured in minutes. + // + // A value of 0 (the default) disables repeat notifications. + Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (a HighAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) a.Threshold.Validate(validator) - dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 6*time.Hour) + delayDur := a.Delay.Duration() + validator.Duration("delay", &delayDur).InRange(0, 6*time.Hour) + repeatDur := a.Repeat.Duration() + validator.Duration("repeat", &repeatDur).Using(validateRepeat) +} + +// Evaluate the given data to determine if an alert should be sent. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *HighAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := &EvalResult{ + Name: "high", + Evaluator: func(dv, tv float64) bool { return dv > tv }, + } + er.Process(ctx, a.Threshold, data) + return *er } // DurationMinutes reads a JSON integer and converts it to a time.Duration. @@ -201,7 +628,7 @@ func (m DurationMinutes) Duration() time.Duration { return time.Duration(m) } -// ValueWithUnits binds a value to its units. +// ValueWithUnits binds a value with its units. // // Other types can extend it to parse and validate the Units. type ValueWithUnits struct { @@ -214,31 +641,59 @@ type Threshold ValueWithUnits // Validate implements structure.Validatable func (t Threshold) Validate(v structure.Validator) { - v.String("units", &t.Units).OneOf(glucose.MgdL, glucose.MmolL) - // This is a sanity check. Client software will likely further constrain these values. The - // broadness of these values allows clients to change their own min and max values - // independently, and it sidesteps rounding and conversion conflicts between the backend and - // clients. + v.String("units", &t.Units).OneOf(dataBloodGlucose.MgdL, dataBloodGlucose.MmolL) + // This is a sanity check. Client software will likely further constrain these + // values. The broadness of these values allows clients to change their own min and max + // values independently, and it sidesteps rounding and conversion conflicts between the + // backend and clients. var max, min float64 switch t.Units { - case glucose.MgdL, glucose.Mgdl: - max = glucose.MgdLMaximum - min = glucose.MgdLMinimum + case dataBloodGlucose.MgdL, dataBloodGlucose.Mgdl: + max = dataBloodGlucose.MgdLMaximum + min = dataBloodGlucose.MgdLMinimum v.Float64("value", &t.Value).InRange(min, max) - case glucose.MmolL, glucose.Mmoll: - max = glucose.MmolLMaximum - min = glucose.MmolLMinimum + case dataBloodGlucose.MmolL, dataBloodGlucose.Mmoll: + max = dataBloodGlucose.MmolLMaximum + min = dataBloodGlucose.MmolLMinimum v.Float64("value", &t.Value).InRange(min, max) default: v.WithReference("value").ReportError(validator.ErrorValueNotValid()) } } -// Repository abstracts persistent storage for Config data. +// Repository abstracts persistent storage in the alerts collection for Config data. type Repository interface { Get(ctx context.Context, conf *Config) (*Config, error) Upsert(ctx context.Context, conf *Config) error Delete(ctx context.Context, conf *Config) error + List(ctx context.Context, userID string) ([]*Config, error) EnsureIndexes() error } + +// Notification gathers information necessary for sending an alert notification. +type Notification struct { + // Message communicates the alert to the recipient. + Message string + RecipientUserID string + FollowedUserID string + Sent func(time.Time) +} + +// LastCommunicationsRepository encapsulates queries of the [LastCommunication] records +// collection for use with alerts. +type LastCommunicationsRepository interface { + // RecordReceivedDeviceData upserts the time of last communication from a user. + RecordReceivedDeviceData(context.Context, LastCommunication) error + // OverdueCommunications lists records for those users that haven't communicated for a + // time. + OverdueCommunications(context.Context) ([]LastCommunication, error) + + EnsureIndexes() error +} + +// DosingDecision is an alias of convenience. +type DosingDecision = dosingdecision.DosingDecision + +// Glucose is an alias of convenience. +type Glucose = glucose.Glucose diff --git a/alerts/config_test.go b/alerts/config_test.go index ec479d8fb4..226bbbb847 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -3,6 +3,7 @@ package alerts import ( "bytes" "context" + "encoding/json" "fmt" "strings" "testing" @@ -11,8 +12,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/tidepool-org/platform/data/blood/glucose" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood" + "github.com/tidepool-org/platform/log" logTest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/pointer" "github.com/tidepool-org/platform/request" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/test" @@ -23,9 +28,10 @@ func TestSuite(t *testing.T) { } const ( - mockUserID1 = "008c7f79-6545-4466-95fb-34e3ba728d38" - mockUserID2 = "b1880201-30d5-4190-92bb-6afcf08ca15e" - mockUploadID = "4d3b1abc280511ef9f41abf13a093b64" + mockUserID1 = "11111111-7357-7357-7357-111111111111" + mockUserID2 = "22222222-7357-7357-7357-222222222222" + mockUserID3 = "33333333-7357-7357-7357-333333333333" + mockDataSetID = "73577357735773577357735773577357" ) var _ = Describe("Config", func() { @@ -45,7 +51,6 @@ var _ = Describe("Config", func() { }, "urgentLow": { "enabled": false, - "repeat": 30, "threshold": { "units": "mg/dL", "value": 47.5 @@ -62,77 +67,358 @@ var _ = Describe("Config", func() { }, "notLooping": { "enabled": true, - "repeat": 32, "delay": 4 }, "noCommunication": { "enabled": true, - "repeat": 33, "delay": 6 } -}`, mockUserID1, mockUserID2, mockUploadID) - conf := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, conf) +}`, mockUserID1, mockUserID2, mockDataSetID) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).ToNot(HaveOccurred()) - Expect(conf.UserID).To(Equal(mockUserID1)) - Expect(conf.FollowedUserID).To(Equal(mockUserID2)) - Expect(conf.UploadID).To(Equal(mockUploadID)) - Expect(conf.High.Enabled).To(Equal(false)) - Expect(conf.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) - Expect(conf.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) - Expect(conf.High.Threshold.Value).To(Equal(10.0)) - Expect(conf.High.Threshold.Units).To(Equal(glucose.MmolL)) - Expect(conf.Low.Enabled).To(Equal(true)) - Expect(conf.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) - Expect(conf.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) - Expect(conf.Low.Threshold.Value).To(Equal(80.0)) - Expect(conf.Low.Threshold.Units).To(Equal(glucose.MgdL)) - Expect(conf.UrgentLow.Enabled).To(Equal(false)) - Expect(conf.UrgentLow.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) - Expect(conf.UrgentLow.Threshold.Value).To(Equal(47.5)) - Expect(conf.UrgentLow.Threshold.Units).To(Equal(glucose.MgdL)) - Expect(conf.NotLooping.Enabled).To(Equal(true)) - Expect(conf.NotLooping.Repeat).To(Equal(DurationMinutes(32 * time.Minute))) - Expect(conf.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) - Expect(conf.NoCommunication.Enabled).To(Equal(true)) - Expect(conf.NoCommunication.Repeat).To(Equal(DurationMinutes(33 * time.Minute))) - Expect(conf.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) + Expect(cfg.UserID).To(Equal(mockUserID1)) + Expect(cfg.FollowedUserID).To(Equal(mockUserID2)) + Expect(cfg.UploadID).To(Equal(mockDataSetID)) + Expect(cfg.Alerts.High.Enabled).To(Equal(false)) + Expect(cfg.Alerts.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) + Expect(cfg.Alerts.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) + Expect(cfg.Alerts.High.Threshold.Value).To(Equal(10.0)) + Expect(cfg.Alerts.High.Threshold.Units).To(Equal(dataBloodGlucose.MmolL)) + Expect(cfg.Alerts.Low.Enabled).To(Equal(true)) + Expect(cfg.Alerts.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) + Expect(cfg.Alerts.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) + Expect(cfg.Alerts.Low.Threshold.Value).To(Equal(80.0)) + Expect(cfg.Alerts.Low.Threshold.Units).To(Equal(dataBloodGlucose.MgdL)) + Expect(cfg.Alerts.UrgentLow.Enabled).To(Equal(false)) + Expect(cfg.Alerts.UrgentLow.Threshold.Value).To(Equal(47.5)) + Expect(cfg.Alerts.UrgentLow.Threshold.Units).To(Equal(dataBloodGlucose.MgdL)) + Expect(cfg.Alerts.NotLooping.Enabled).To(Equal(true)) + Expect(cfg.Alerts.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) + // Expect(conf.Alerts.NoCommunication.Enabled).To(Equal(true)) + // Expect(conf.Alerts.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) }) Context("validations", func() { - testConfig := func() Config { - return Config{ - UserID: mockUserID1, - FollowedUserID: mockUserID2, - UploadID: mockUploadID, - } - } - It("requires an UploadID", func() { - c := testConfig() - c.UploadID = "" + cfg := testConfig() + cfg.UploadID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) It("requires an FollowedUserID", func() { - c := testConfig() - c.FollowedUserID = "" + cfg := testConfig() + cfg.FollowedUserID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) It("requires an UserID", func() { - c := testConfig() - c.UserID = "" + cfg := testConfig() + cfg.UserID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) }) + Context("when a notification is returned", func() { + Describe("EvaluateNoCommunication", func() { + It("injects user ids", func() { + ctx, lgr, cfg := newConfigTest() + cfg.Alerts.NoCommunication.Enabled = true + + when := time.Now().Add(-(DefaultNoCommunicationDelay + time.Second)) + n, _ := cfg.EvaluateNoCommunication(ctx, lgr, when) + + Expect(n).ToNot(BeNil()) + Expect(n.RecipientUserID).To(Equal(mockUserID1)) + Expect(n.FollowedUserID).To(Equal(mockUserID2)) + }) + }) + }) + + Describe("EvaluateData", func() { + var okGlucose = []*Glucose{testInRangeDatum()} + var okDosing = []*DosingDecision{testDosingDecision(time.Second)} + + type evalTest struct { + Name string + Activity func(*Config) *AlertActivity + Glucose []*Glucose + Dosing []*DosingDecision + } + + tests := []evalTest{ + {"UrgentLow", func(c *Config) *AlertActivity { return &c.Activity.UrgentLow }, + []*Glucose{testUrgentLowDatum()}, nil}, + {"Low", func(c *Config) *AlertActivity { return &c.Activity.Low }, + []*Glucose{testLowDatum()}, nil}, + {"High", func(c *Config) *AlertActivity { return &c.Activity.High }, + []*Glucose{testHighDatum()}, nil}, + {"NotLooping", func(c *Config) *AlertActivity { return &c.Activity.NotLooping }, + nil, []*DosingDecision{testDosingDecision(-30 * time.Hour)}}, + } + for _, test := range tests { + Context(test.Name, func() { + It("is triggered", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + cfg.EvaluateData(ctx, okGlucose, okDosing) + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).ToNot(BeZero()) + }) + + It("doesn't update its triggered time", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + cfg.EvaluateData(ctx, okGlucose, okDosing) + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).ToNot(BeZero()) + prev := test.Activity(cfg).Triggered + n, _ = cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).To(Equal(prev)) + }) + + It("is resolved", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Resolved).To(BeZero()) + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + Expect(test.Activity(cfg).Resolved).To(BeTemporally("~", time.Now())) + }) + + It("doesn't update its resolved time", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + prev := test.Activity(cfg).Resolved + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + Expect(test.Activity(cfg).Resolved).To(Equal(prev)) + }) + }) + } + + type logTest struct { + Name string + Msg string + Fields log.Fields + } + + logTests := []logTest{ + {"UrgentLow", "urgent low", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 3.0}}, + {"Low", "low", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 4.0}}, + {"High", "high", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 10.0}}, + {"NotLooping", "not looping", log.Fields{ + "isAlerting?": false, + // "value" is time-dependent, and would require a lot of work to mock. This + // should be close enough. + "threshold": DefaultNotLoopingDelay, + }}, + } + for _, test := range logTests { + It(test.Name+" logs evaluations", func() { + ctx, lgr, cfg := newConfigTest() + cfg.Alerts.NotLooping.Base.Enabled = true + glucose := []*Glucose{testInRangeDatum()} + dosing := []*DosingDecision{testDosingDecision(-1)} + cfg.EvaluateData(ctx, glucose, dosing) + + Expect(func() { + lgr.AssertLog(log.InfoLevel, test.Msg, test.Fields) + }).ToNot(Panic(), quickJSON(map[string]any{ + "got": lgr.SerializedFields, + "expected": map[string]any{"message": test.Msg, "fields": test.Fields}, + })) + }) + } + + It("injects user IDs into the returned Notification", func() { + ctx, _, cfg := newConfigTest() + mockGlucoseData := []*Glucose{testUrgentLowDatum()} + + n, _ := cfg.EvaluateData(ctx, mockGlucoseData, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.RecipientUserID).To(Equal(mockUserID1)) + Expect(n.FollowedUserID).To(Equal(mockUserID2)) + }) + + It("ripples the needs upsert value (from urgent low)", func() { + ctx, _, cfg := newConfigTest() + + // Generate an urgent low notification. + n, _ := cfg.EvaluateData(ctx, []*Glucose{testUrgentLowDatum()}, nil) + Expect(n).ToNot(Equal(nil)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + It("ripples the needs upsert value (from low)", func() { + ctx, _, cfg := newConfigTest() + + // Generate a low notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testLowDatum()}, nil) + Expect(n).ToNot(BeNil()) + Expect(needsUpsert).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert = cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + It("ripples the needs upsert value (form high)", func() { + ctx, _, cfg := newConfigTest() + + // Generate a high notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testHighDatum()}, nil) + Expect(n).ToNot(BeNil()) + Expect(needsUpsert).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert = cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + Describe("Repeat", func() { + It("Low is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.Low.Repeat = DurationMinutes(10 * time.Minute) + cfg.Alerts.Low.Delay = DurationMinutes(1 * time.Nanosecond) + cfg.Activity.Low.Triggered = time.Now().Add(-time.Hour) + cfg.Activity.Low.Sent = time.Now().Add((-10 * time.Minute) + time.Second) + testData := []*Glucose{testLowDatum()} + + n, _ := cfg.EvaluateData(ctx, testData, nil) + Expect(n).To(BeNil()) + + cfg.Activity.Low.Sent = time.Now().Add((-10 * time.Minute) - time.Second) + + n, _ = cfg.EvaluateData(ctx, testData, nil) + Expect(n).ToNot(BeNil()) + }) + + It("High is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.High.Repeat = DurationMinutes(10 * time.Minute) + cfg.Alerts.High.Delay = DurationMinutes(1 * time.Nanosecond) + cfg.Activity.High.Triggered = time.Now().Add(-time.Hour) + cfg.Activity.High.Sent = time.Now().Add((-10 * time.Minute) + time.Second) + delayed := []*Glucose{testHighDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + cfg.Activity.High.Sent = time.Now().Add((-10 * time.Minute) - time.Second) + + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) + }) + }) + + Describe("Delay", func() { + It("Low is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.Low.Delay = DurationMinutes(5 * time.Minute) + cfg.Alerts.Low.Repeat = DurationMinutes(1 * time.Nanosecond) + delayed := []*Glucose{testLowDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + delayed[0].Time = pointer.FromAny(time.Now().Add(-5 * time.Minute)) + + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) + }) + + It("High is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.High.Delay = DurationMinutes(5 * time.Minute) + cfg.Alerts.High.Repeat = DurationMinutes(1 * time.Nanosecond) + delayed := []*Glucose{testHighDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + delayed[0].Time = pointer.FromAny(time.Now().Add(-5 * time.Minute)) + + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) + }) + + It("NotLooping is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping.Enabled = true + delay := 10 * time.Minute + lessThanDelay := delay - time.Second + cfg.Alerts.NotLooping.Delay = DurationMinutes(delay) + delayed := []*DosingDecision{testDosingDecision(-lessThanDelay)} + + n, _ := cfg.EvaluateData(ctx, nil, delayed) + Expect(n).To(BeNil()) + + moreThanDelay := delay + time.Second + delayed[0].Time = pointer.FromAny(time.Now().Add(-moreThanDelay)) + + n, _ = cfg.EvaluateData(ctx, nil, delayed) + Expect(n).ToNot(BeNil()) + }) + + It("NotLooping uses its default", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping.Enabled = true + cfg.Alerts.NotLooping.Delay = 0 + lessThanDelay := DefaultNotLoopingDelay - time.Second + delayed := []*DosingDecision{testDosingDecision(-lessThanDelay)} + + n, _ := cfg.EvaluateData(ctx, nil, delayed) + Expect(n).To(BeNil()) + + moreThanDelay := DefaultNotLoopingDelay + time.Second + delayed[0].Time = pointer.FromAny(time.Now().Add(-moreThanDelay)) + + n, _ = cfg.EvaluateData(ctx, nil, delayed) + Expect(n).ToNot(BeNil()) + }) + }) + }) + + It("observes NotLoopingRepeat between notifications", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + yesterday := []*DosingDecision{testDosingDecision(-24 * time.Hour)} + + cfg.Activity.NotLooping.Sent = time.Now() + n, _ := cfg.EvaluateData(ctx, nil, yesterday) + Expect(n).To(BeNil()) + + cfg.Activity.NotLooping.Sent = time.Now().Add(-(1 + NotLoopingRepeat)) + n, _ = cfg.EvaluateData(ctx, nil, yesterday) + Expect(n).ToNot(BeNil()) + }) + Context("UrgentLowAlert", func() { Context("Threshold", func() { It("accepts values between 0 and 1000 mg/dL", func() { @@ -157,6 +443,114 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value -1 is not between 0 and 1000")) }) }) + + Context("Evaluate", func() { + It("handles being passed empty data", func() { + ctx, _, cfg := newConfigTest() + ul := cfg.Alerts.UrgentLow + + er := EvalResult{} + Expect(func() { + er = ul.Evaluate(ctx, []*Glucose{}) + }).ToNot(Panic()) + Expect(func() { + er = ul.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + }) + + It("validates glucose data", func() { + ctx, _, cfg := newConfigTest() + ul := cfg.Alerts.UrgentLow + + er := EvalResult{} + Expect(func() { + er = ul.Evaluate(ctx, []*Glucose{testUrgentLowDatum()}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(true)) + + badUnits := testInRangeDatum() + badUnits.Units = nil + Expect(func() { + er = ul.Evaluate(ctx, []*Glucose{badUnits}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + badValue := testInRangeDatum() + badValue.Value = nil + Expect(func() { + er = ul.Evaluate(ctx, []*Glucose{badValue}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + // TODO is this still useful? + // + // badTime := testGlucoseDatum(1) + // badTime.Time = nil + // Expect(func() { + // notification, _ = testUrgentLow().Evaluate(ctx, []*Glucose{badTime}) + // }).ToNot(Panic()) + // Expect(notification).To(BeNil()) + + }) + }) + }) + + Context("NoCommunicationAlert", func() { + Context("Evaluate", func() { + + It("handles being passed a Zero time.Time value", func() { + ctx, _, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication + + Expect(func() { + nc.Evaluate(ctx, time.Time{}) + }).ToNot(Panic()) + }) + + It("logs evaluation results", func() { + ctx, lgr, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication + + Expect(func() { + nc.Evaluate(ctx, time.Now().Add(-12*time.Hour)) + }).ToNot(Panic()) + Expect(func() { + lgr.AssertLog(log.InfoLevel, "no communication", log.Fields{ + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + It("honors non-Zero Delay values", func() { + ctx, _, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication + nc.Enabled = true + nc.Delay = DurationMinutes(10 * time.Minute) + + wontTrigger := time.Now().Add(-(nc.Delay.Duration() - time.Second)) + er := nc.Evaluate(ctx, wontTrigger) + Expect(er.OutOfRange).To(Equal(false)) + + willTrigger := time.Now().Add(-(nc.Delay.Duration() + time.Second)) + er = nc.Evaluate(ctx, willTrigger) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("validates the time at which data was last received", func() { + ctx, _, cfg := newConfigTest() + validLastReceived := time.Now().Add(-10*time.Minute + -DefaultNoCommunicationDelay) + invalidLastReceived := time.Time{} + er := EvalResult{} + nc := cfg.Alerts.NoCommunication + + er = nc.Evaluate(ctx, validLastReceived) + Expect(er.OutOfRange).To(Equal(true)) + + er = nc.Evaluate(ctx, invalidLastReceived) + Expect(er.OutOfRange).To(Equal(false)) + }) + }) }) Context("LowAlert", func() { @@ -216,6 +610,57 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value 6h1m0s is not between 0s and 6h0m0s")) }) }) + + Context("Evaluate", func() { + It("handles being passed empty data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + low := cfg.Alerts.Low + + Expect(func() { + er = low.Evaluate(ctx, []*Glucose{}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + Expect(func() { + er = low.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + }) + + It("validates glucose data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + low := cfg.Alerts.Low + + Expect(func() { + er = low.Evaluate(ctx, []*Glucose{testUrgentLowDatum()}) + }).ToNot(Panic()) + Expect(er.OutOfRange).ToNot(Equal(false)) + + badUnits := testUrgentLowDatum() + badUnits.Units = nil + Expect(func() { + er = low.Evaluate(ctx, []*Glucose{badUnits}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + badValue := testUrgentLowDatum() + badValue.Value = nil + Expect(func() { + er = low.Evaluate(ctx, []*Glucose{badValue}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + // TODO is this useful? + // + // badTime := testGlucoseDatum(1) + // badTime.Time = nil + // Expect(func() { + // notification, _ = low.Evaluate(ctx, []*Glucose{badTime}) + // }).ToNot(Panic()) + // Expect(notification).To(BeNil()) + }) + }) }) Context("HighAlert", func() { @@ -268,6 +713,57 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value 6h1m0s is not between 0s and 6h0m0s")) }) }) + + Context("Evaluate", func() { + + It("handles being passed empty data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + high := cfg.Alerts.High + + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + Expect(func() { + er = high.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + }) + + It("validates glucose data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + high := cfg.Alerts.High + + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{testHighDatum()}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(true)) + + badUnits := testInRangeDatum() + badUnits.Units = nil + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{badUnits}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + badValue := testInRangeDatum() + badValue.Value = nil + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{badValue}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + + // TODO is this still useful? + badTime := testInRangeDatum() + badTime.Time = nil + Expect(func() { + er = high.Evaluate(ctx, []*Glucose{badTime}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) + }) + }) }) Context("NoCommunicationAlert", func() { @@ -297,6 +793,7 @@ var _ = Describe("Config", func() { }) Context("NotLoopingAlert", func() { + Context("Delay", func() { It("accepts values between 0 and 2 hours (inclusive)", func() { val := validator.New(logTest.NewLogger()) @@ -319,37 +816,126 @@ var _ = Describe("Config", func() { b.Validate(val) Expect(val.Error()).To(MatchError("value 2h0m1s is not between 0s and 2h0m0s")) }) + }) + + Context("Evaluate", func() { + + It("uses a default delay of 30 minutes", func() { + ctx, _, cfg := newConfigTest() + decisionsNoAlert := []*DosingDecision{ + testDosingDecision(-29 * time.Minute), + } + decisionsWithAlert := []*DosingDecision{ + testDosingDecision(-30 * time.Minute), + } + nl := cfg.Alerts.NotLooping + + er := nl.Evaluate(ctx, decisionsNoAlert) + Expect(er.OutOfRange).To(Equal(false), er.String()) + er = nl.Evaluate(ctx, decisionsWithAlert) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("respects custom delays", func() { + ctx, _, cfg := newConfigTest() + decisionsNoAlert := []*DosingDecision{ + testDosingDecision(-14 * time.Minute), + } + decisionsWithAlert := []*DosingDecision{ + testDosingDecision(-15 * time.Minute), + } + nl := cfg.Alerts.NotLooping + nl.Delay = DurationMinutes(15 * time.Minute) + er := nl.Evaluate(ctx, decisionsNoAlert) + Expect(er.OutOfRange).To(Equal(false)) + er = nl.Evaluate(ctx, decisionsWithAlert) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("handles being passed empty data", func() { + ctx, _, cfg := newConfigTest() + er := EvalResult{} + + nl := cfg.Alerts.NotLooping + + Expect(func() { + er = nl.Evaluate(ctx, []*DosingDecision{}) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(true)) + Expect(func() { + er = nl.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("ignores decisions without a reason", func() { + ctx, _, cfg := newConfigTest() + nl := cfg.Alerts.NotLooping + noReason := testDosingDecision(time.Second) + noReason.Reason = nil + decisions := []*DosingDecision{ + testDosingDecision(-time.Hour), + noReason, + } + + er := nl.Evaluate(ctx, decisions) + Expect(er.OutOfRange).To(Equal(true)) + }) + + It("ignores decisions without a time", func() { + ctx, _, cfg := newConfigTest() + + nl := cfg.Alerts.NotLooping + noTime := testDosingDecision(time.Second) + noTime.Time = nil + decisions := []*DosingDecision{ + testDosingDecision(-time.Hour), + noTime, + } + + er := nl.Evaluate(ctx, decisions) + Expect(er.OutOfRange).To(Equal(true)) + }) }) }) Context("repeat", func() { + var defaultAlert = LowAlert{ + Threshold: Threshold{Value: 11, Units: dataBloodGlucose.MmolL}, + } + It("accepts values of 0 (indicating disabled)", func() { val := validator.New(logTest.NewLogger()) - b := Base{Repeat: 0} - b.Validate(val) + l := defaultAlert + l.Repeat = 0 + l.Validate(val) Expect(val.Error()).To(Succeed()) }) It("accepts values of 15 minutes to 4 hours (inclusive)", func() { val := validator.New(logTest.NewLogger()) - b := Base{Repeat: DurationMinutes(15 * time.Minute)} - b.Validate(val) + l := defaultAlert + l.Repeat = DurationMinutes(15 * time.Minute) + l.Validate(val) Expect(val.Error()).To(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(4 * time.Hour)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(4 * time.Hour) + l.Validate(val) Expect(val.Error()).To(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(4*time.Hour + 1)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(4*time.Hour + 1) + l.Validate(val) Expect(val.Error()).NotTo(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(15*time.Minute - 1)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(15*time.Minute - 1) + l.Validate(val) Expect(val.Error()).NotTo(Succeed()) }) }) @@ -361,67 +947,163 @@ var _ = Describe("Config", func() { err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(MatchError("json is malformed")) }) - It("validates repeat minutes (negative)", func() { + }) + + Context("low", func() { + It("accepts a blank repeat", func() { buf := buff(`{ "userId": "%s", "followedUserId": "%s", "uploadId": "%s", - "urgentLow": { - "enabled": false, - "repeat": -11, + "low": { + "enabled": true, + "delay": 10, "threshold": { - "units": "%s", - "value": 47.5 + "units": "mg/dL", + "value": 80 } } -}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) - cfg := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, cfg) - Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) +}`, mockUserID1, mockUserID2, mockDataSetID) + conf := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, conf) + Expect(err).To(Succeed()) + Expect(conf.Alerts.Low.Repeat).To(Equal(DurationMinutes(0))) }) - It("validates repeat minutes (string)", func() { - buf := buff(`{ + }) + It("validates repeat minutes (negative)", func() { + buf := buff(`{ "userId": "%s", "followedUserId": "%s", - "urgentLow": { + "uploadId": "%s", + "low": { "enabled": false, - "repeat": "a", + "repeat": -11, "threshold": { "units": "%s", - "value": 1 + "value": 47.5 } } -}`, mockUserID1, mockUserID2, glucose.MgdL) - cfg := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, cfg) - Expect(err).To(MatchError("json is malformed")) - }) +}`, mockUserID1, mockUserID2, mockDataSetID, dataBloodGlucose.MgdL) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) + Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) }) - - Context("low", func() { - It("accepts a blank repeat", func() { - buf := buff(`{ + It("validates repeat minutes (string)", func() { + buf := buff(`{ "userId": "%s", "followedUserId": "%s", "uploadId": "%s", "low": { - "enabled": true, - "delay": 10, + "enabled": false, + "repeat": "a", "threshold": { - "units": "mg/dL", - "value": 80 + "units": "%s", + "value": 1 } } -}`, mockUserID1, mockUserID2, mockUploadID) - conf := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, conf) - Expect(err).To(Succeed()) - Expect(conf.Low.Repeat).To(Equal(DurationMinutes(0))) +}`, mockUserID1, mockUserID2, mockDataSetID, dataBloodGlucose.MgdL) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) + Expect(err).To(MatchError("json is malformed")) + }) +}) + +var _ = Describe("Alerts", func() { + Describe("LongestDelay", func() { + It("does what it says", func() { + low := testLowAlert() + low.Delay = DurationMinutes(10 * time.Minute) + high := testHighAlert() + high.Delay = DurationMinutes(5 * time.Minute) + notLooping := testNotLoopingAlert() + notLooping.Delay = DurationMinutes(5 * time.Minute) + + a := Alerts{ + Low: low, + High: high, + NotLooping: notLooping, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(10 * time.Minute)) + }) + + It("ignores disabled alerts", func() { + low := testLowAlert() + low.Delay = DurationMinutes(7 * time.Minute) + high := testHighAlert() + high.Delay = DurationMinutes(5 * time.Minute) + notLooping := testNotLoopingAlert() + notLooping.Delay = DurationMinutes(5 * time.Minute) + + a := Alerts{ + Low: low, + High: high, + NotLooping: notLooping, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(7 * time.Minute)) + }) + + It("returns a Zero Duration when no alerts are set", func() { + a := Alerts{ + Low: nil, + High: nil, + NotLooping: nil, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(time.Duration(0))) + }) + }) + + Describe("Evaluate", func() { + + It("detects urgent low data", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testUrgentLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below urgent low threshold")) + }) + + It("detects low data", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below low threshold")) + }) + + It("detects high data", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testHighDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("above high threshold")) + }) + + Context("with both low and urgent low alerts detected", func() { + It("prefers urgent low", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testUrgentLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below urgent low threshold")) + }) }) }) }) -var _ = Describe("Duration", func() { +var _ = Describe("DurationMinutes", func() { It("parses 42", func() { d := DurationMinutes(0) err := d.UnmarshalJSON([]byte(`42`)) @@ -456,20 +1138,20 @@ var _ = Describe("Duration", func() { var _ = Describe("Threshold", func() { It("accepts mg/dL", func() { - buf := buff(`{"units":"%s","value":42}`, glucose.MgdL) + buf := buff(`{"units":"%s","value":42}`, dataBloodGlucose.MgdL) threshold := &Threshold{} err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(glucose.MgdL)) + Expect(threshold.Units).To(Equal(dataBloodGlucose.MgdL)) }) It("accepts mmol/L", func() { - buf := buff(`{"units":"%s","value":42}`, glucose.MmolL) + buf := buff(`{"units":"%s","value":42}`, dataBloodGlucose.MmolL) threshold := &Threshold{} err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(glucose.MmolL)) + Expect(threshold.Units).To(Equal(dataBloodGlucose.MmolL)) }) It("rejects lb/gal", func() { buf := buff(`{"units":"%s","value":42}`, "lb/gal") @@ -482,7 +1164,7 @@ var _ = Describe("Threshold", func() { Expect(err).Should(HaveOccurred()) }) It("is case-sensitive with respect to Units", func() { - badUnits := strings.ToUpper(glucose.MmolL) + badUnits := strings.ToUpper(dataBloodGlucose.MmolL) buf := buff(`{"units":"%s","value":42}`, badUnits) err := request.DecodeObject(context.Background(), nil, buf, &Threshold{}) Expect(err).Should(HaveOccurred()) @@ -490,7 +1172,265 @@ var _ = Describe("Threshold", func() { }) +var _ = Describe("AlertActivity", func() { + Describe("IsActive()", func() { + It("is true", func() { + triggered := time.Now() + resolved := triggered.Add(-time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + resolved := triggered.Add(time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeFalse()) + }) + }) + + Describe("IsSent()", func() { + It("is true", func() { + triggered := time.Now() + sent := triggered.Add(time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Sent: sent, + } + Expect(a.IsSent()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + notified := triggered.Add(-time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Sent: notified, + } + Expect(a.IsSent()).To(BeFalse()) + }) + }) + + Describe("normalizeUnits", func() { + Context("given the same units", func() { + It("doesn't alter them at all", func() { + d := testUrgentLowDatum() + t := Threshold{ + Value: 5.0, + Units: dataBloodGlucose.MmolL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(2.9)) + + d = testUrgentLowDatum() + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MgdL) + t = Threshold{ + Value: 5.0, + Units: dataBloodGlucose.MgdL, + } + dv, tv, err = normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(2.9)) + }) + }) + + Context("value in Mmol/L & threshold in mg/dL", func() { + It("normalizes to Mmol/L", func() { + d := testUrgentLowDatum() + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MmolL) + t := Threshold{ + Value: 90.0, + Units: dataBloodGlucose.MgdL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(4.99567)) + Expect(dv).To(Equal(2.9)) + }) + }) + + Context("value in mg/dL & threshold in Mmol/L", func() { + It("normalizes to Mmol/L", func() { + d := testUrgentLowDatum() + d.Blood.Value = pointer.FromAny(90.0) + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MgdL) + t := Threshold{ + Value: 5.0, + Units: dataBloodGlucose.MmolL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(4.99567)) + }) + }) + }) +}) + // buff is a helper for generating a JSON []byte representation. func buff(format string, args ...interface{}) *bytes.Buffer { return bytes.NewBufferString(fmt.Sprintf(format, args...)) } + +func testDosingDecision(d time.Duration) *DosingDecision { + return &DosingDecision{ + Base: types.Base{ + Time: pointer.FromAny(time.Now().Add(d)), + }, + Reason: pointer.FromAny(DosingDecisionReasonLoop), + } +} + +func testConfig() Config { + return Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + UploadID: mockDataSetID, + } +} + +func testUrgentLowDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(dataBloodGlucose.MmolL), + Value: pointer.FromAny(2.9), + }, + } +} + +func testHighDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(dataBloodGlucose.MmolL), + Value: pointer.FromAny(11.0), + }, + } +} + +func testLowDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(dataBloodGlucose.MmolL), + Value: pointer.FromAny(3.9), + }, + } +} + +func testInRangeDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(dataBloodGlucose.MmolL), + Value: pointer.FromAny(6.0), + }, + } +} + +func testNoCommunication() *NoCommunicationAlert { + return &NoCommunicationAlert{ + Base: Base{Enabled: true}, + } +} + +func testNoCommunicationDisabled() *NoCommunicationAlert { + nc := testNoCommunication() + nc.Enabled = false + return nc +} + +func testNotLoopingDisabled() *NotLoopingAlert { + nl := testNotLooping() + nl.Enabled = false + return nl +} + +func testNotLooping() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{Enabled: true}, + Delay: 0, + } +} + +func testAlertsActivity() Activity { + return Activity{} +} + +func testLowAlert() *LowAlert { + return &LowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 4, + Units: dataBloodGlucose.MmolL, + }, + } +} +func testHighAlert() *HighAlert { + return &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10, + Units: dataBloodGlucose.MmolL, + }, + } +} +func testUrgentLowAlert() *UrgentLowAlert { + return &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 3, + Units: dataBloodGlucose.MmolL, + }, + } +} +func testNotLoopingAlert() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{Enabled: true}, + } +} + +func newConfigTest() (context.Context, *logTest.Logger, *Config) { + lgr := logTest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), lgr) + cfg := &Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + UploadID: mockDataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + Low: testLowAlert(), + High: testHighAlert(), + NotLooping: testNotLoopingDisabled(), // NOTE: disabled + NoCommunication: testNoCommunicationDisabled(), // NOTE: disabled + }, + Activity: testAlertsActivity(), + } + return ctx, lgr, cfg +} + +func quickJSON(v any) string { + b, err := json.MarshalIndent(v, "", " ") + if err != nil { + return fmt.Sprintf("", v) + } + return string(b) +} diff --git a/alerts/evaluator.go b/alerts/evaluator.go new file mode 100644 index 0000000000..9608c7f243 --- /dev/null +++ b/alerts/evaluator.go @@ -0,0 +1,200 @@ +package alerts + +import ( + "cmp" + "context" + "slices" + "time" + + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/permission" +) + +// DataRepository encapsulates queries of the data collection for use with alerts. +type DataRepository interface { + // GetAlertableData queries for the data used to evaluate alerts configurations. + GetAlertableData(ctx context.Context, params GetAlertableDataParams) (*GetAlertableDataResponse, error) +} + +type GetAlertableDataParams struct { + // UserID of the user that owns the data. + UserID string + // UploadID of the device data set to query. + // + // The term DataSetID should be preferred, but UploadID already existed in some places. + UploadID string + // Start limits the data to those recorded after this time. + Start time.Time + // End limits the data to those recorded before this time. + End time.Time +} + +type GetAlertableDataResponse struct { + DosingDecisions []*dosingdecision.DosingDecision + Glucose []*glucose.Glucose +} + +type Evaluator struct { + Alerts Repository + Data DataRepository + Logger log.Logger + Permissions permission.Client + TokenProvider auth.ServerSessionTokenProvider +} + +func NewEvaluator(alerts Repository, dataRepo DataRepository, permissions permission.Client, + logger log.Logger, tokenProvider auth.ServerSessionTokenProvider) *Evaluator { + + return &Evaluator{ + Alerts: alerts, + Data: dataRepo, + Logger: logger, + Permissions: permissions, + TokenProvider: tokenProvider, + } +} + +// EvaluateData generates alert notifications in response to a user uploading data. +func (e *Evaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( + []*Notification, error) { + + configs, err := e.gatherConfigs(ctx, followedUserID, dataSetID) + if err != nil { + return nil, err + } + + configsByDataSetID := e.mapConfigsByDataSetID(configs) + + notifications := []*Notification{} + for dsID, configs := range configsByDataSetID { + resp, err := e.gatherData(ctx, followedUserID, dsID, configs) + if err != nil { + return nil, err + } + for _, config := range configs { + lgr := config.LoggerWithFields(e.Logger) + notification, needsUpsert := e.genNotificationForConfig(ctx, lgr, config, resp) + if notification != nil { + notifications = append(notifications, notification) + } + if needsUpsert { + err := e.Alerts.Upsert(ctx, config) + if err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } + } + } + + return notifications, nil +} + +func (e *Evaluator) genNotificationForConfig(ctx context.Context, lgr log.Logger, + config *Config, resp *GetAlertableDataResponse) (*Notification, bool) { + + notification, needsUpsert := config.EvaluateData(ctx, resp.Glucose, resp.DosingDecisions) + if notification != nil { + notification.Sent = e.wrapWithUpsert(ctx, lgr, config, notification.Sent) + } + return notification, needsUpsert +} + +func (e *Evaluator) mapConfigsByDataSetID(cfgs []*Config) map[string][]*Config { + mapped := map[string][]*Config{} + for _, cfg := range cfgs { + if _, found := mapped[cfg.UploadID]; !found { + mapped[cfg.UploadID] = []*Config{} + } + mapped[cfg.UploadID] = append(mapped[cfg.UploadID], cfg) + } + return mapped +} + +func (e *Evaluator) gatherConfigs(ctx context.Context, followedUserID, dataSetID string) ( + []*Config, error) { + + configs, err := e.Alerts.List(ctx, followedUserID) + if err != nil { + return nil, err + } + configs = slices.DeleteFunc(configs, e.authDenied(ctx)) + configs = slices.DeleteFunc(configs, func(config *Config) bool { + return config.UploadID != dataSetID + }) + return configs, nil +} + +// authDenied builds a function for slices.DeleteFunc to remove unauthorized users' Configs. +// +// This would catch the unintended case where a follower's permission was revoked, but their +// [Config] wasn't deleted. +// +// A closure is used to inject information from the evaluator into the resulting function. +func (e *Evaluator) authDenied(ctx context.Context) func(*Config) bool { + return func(c *Config) bool { + if c == nil { + return true + } + logger := e.Logger.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + }) + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, e.TokenProvider) + perms, err := e.Permissions.GetUserPermissions(ctx, c.UserID, c.FollowedUserID) + if err != nil { + logger.WithError(err).Warn("Unable to confirm permissions; skipping") + return true + } + if _, found := perms[permission.Follow]; !found { + logger.Debug("permission denied: skipping") + return true + } + return false + } +} + +func (e *Evaluator) gatherData(ctx context.Context, followedUserID, dataSetID string, + configs []*Config) (*GetAlertableDataResponse, error) { + + if len(configs) == 0 { + return nil, nil + } + + longestDelay := slices.MaxFunc(configs, func(i, j *Config) int { + return cmp.Compare(i.LongestDelay(), j.LongestDelay()) + }).LongestDelay() + longestDelay = max(5*time.Minute, longestDelay) + params := GetAlertableDataParams{ + UserID: followedUserID, + UploadID: dataSetID, + Start: time.Now().Add(-longestDelay), + } + resp, err := e.Data.GetAlertableData(ctx, params) + if err != nil { + return nil, err + } + + resp.Glucose = slices.DeleteFunc(resp.Glucose, + func(g *glucose.Glucose) bool { return g.Time == nil }) + resp.DosingDecisions = slices.DeleteFunc(resp.DosingDecisions, + func(d *dosingdecision.DosingDecision) bool { return d.Time == nil }) + + return resp, nil +} + +// wrapWithUpsert to upsert the Config that triggered the Notification after it's sent. +func (e *Evaluator) wrapWithUpsert(ctx context.Context, lgr log.Logger, config *Config, + original func(time.Time)) func(time.Time) { + + return func(at time.Time) { + if original != nil { + original(at) + } + if err := e.Alerts.Upsert(ctx, config); err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } +} diff --git a/alerts/evaluator_test.go b/alerts/evaluator_test.go new file mode 100644 index 0000000000..f3655ca1ea --- /dev/null +++ b/alerts/evaluator_test.go @@ -0,0 +1,437 @@ +package alerts + +import ( + "context" + "errors" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" +) + +var _ = Describe("Evaluator", func() { + Describe("EvaluateData", func() { + It("handles data for users without any followers gracefully", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + + evaluator := NewEvaluator(alertsRepo, nil, nil, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(notifications).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("handles data queries that return empty results (perm denied)", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + }, + }) + dataRepo := newMockDataRepo() + perms := newMockPermissionClient() + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(notifications).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("filters users without permission", func() { + // This simulates the case when permission is revoked, but the corresponding + // alerts.Config isn't yet deleted. + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID + "-2", + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + High: &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10.0, + Units: dataBloodGlucose.MmolL, + }, + }, + }, + }, + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + High: &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10.0, + Units: dataBloodGlucose.MmolL, + }, + }, + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testHighDatum()}, + }, + } + perms := newMockPermissionClient() + perms.Allow(testUserID, testFollowedUserID, permission.Follow) + // This user still has a config, but has had their follow permission revoked. + perms.Allow(testUserID+"-2", testFollowedUserID, permission.Read) + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + if Expect(len(notifications)).To(Equal(1)) { + Expect(notifications[0].RecipientUserID).To(Equal(testUserID)) + } + }) + + It("handles data queries that return empty results (no data)", func() { + ctx, lgr, cfg := newConfigTest() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{cfg}) + dataRepo := newMockDataRepo() + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + e := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := e.EvaluateData(ctx, mockUserID2, mockDataSetID) + + Expect(ns).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("returns notifications", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testUrgentLowDatum()}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + if Expect(notifications).To(HaveLen(1)) { + msgFound := strings.Contains(notifications[0].Message, "below urgent low") + Expect(msgFound).To(BeTrue()) + } + Expect(err).To(Succeed()) + }) + + It("queries data based on the longest delay", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + longerDelay := testHighAlert() + longerDelay.Delay = DurationMinutes(3) + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID + "-2", + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + High: testHighAlert(), + }, + }, + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + High: longerDelay, + }, + }, + }) + highDatum := testHighDatum() + highDatum.Blood.Base.Time = pointer.FromAny(time.Now().Add(-10 * time.Minute)) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{highDatum}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + Expect(err).To(Succeed()) + if Expect(notifications).To(HaveLen(2)) { + msgFound := strings.Contains(notifications[0].Message, "above high") + Expect(msgFound).To(BeTrue(), notifications[0].Message) + } + }) + + It("wraps notifications so that changes are persisted when pushed", func() { + ctx, lgr, cfg := newConfigTest() + startOfTest := time.Now() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{cfg}) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + {Glucose: []*glucose.Glucose{testUrgentLowDatum()}}, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := evaluator.EvaluateData(ctx, mockUserID2, mockDataSetID) + Expect(err).To(Succeed()) + Expect(len(ns)).To(Equal(1)) + for _, n := range ns { + Expect(n.Sent).ToNot(BeNil()) + n.Sent(time.Now()) + } + if Expect(len(alertsRepo.UpsertCalls)).To(Equal(2)) { + activity := alertsRepo.UpsertCalls[1].Activity.UrgentLow + Expect(activity.Sent).To(BeTemporally(">", startOfTest)) + } + }) + + It("persists changes when there's no new Notification", func() { + // For example if an alert is resolved, that change should be persisted, even + // when there isn't a notification generated. + ctx, lgr := contextWithNullLoggerDeluxe() + startOfTest := time.Now() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + }, + Activity: Activity{ + UrgentLow: AlertActivity{ + Triggered: time.Now().Add(-10 * time.Minute), + }, + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testInRangeDatum()}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + Expect(len(ns)).To(Equal(0)) + if Expect(len(alertsRepo.UpsertCalls)).To(Equal(1)) { + activity := alertsRepo.UpsertCalls[0].Activity.UrgentLow + Expect(activity.Resolved).To(BeTemporally(">", startOfTest)) + } + }) + + Context("when the user has multiple data sets", func() { + It("ignores Configs that don't match the data set id", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + resp1 := newTestAlertsConfig(testUserID, testDataSetID) + resp2 := newTestAlertsConfig(testUserID+"2", testDataSetID+"2") + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, + []*Config{resp1, resp2}) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + {Glucose: []*glucose.Glucose{testUrgentLowDatum()}}, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + notifications, err := evaluator.EvaluateData(ctx, + testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + if Expect(len(notifications)).To(Equal(1)) { + recipientUserID := notifications[0].RecipientUserID + Expect(recipientUserID).To(Equal(testUserID)) + } + }) + }) + }) +}) + +func newTestAlertsConfig(userID, dataSetID string) *Config { + return &Config{ + UserID: userID, + FollowedUserID: testFollowedUserID, + UploadID: dataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + }, + } +} + +type mockAlertsClient struct { + OverdueCommunicationsError error + OverdueCommunicationsResponses [][]LastCommunication + ListResponses [][]*Config + ListError error + UpsertError error + UpsertCalls []*Config +} + +func newMockAlertsClient() *mockAlertsClient { + return &mockAlertsClient{ + OverdueCommunicationsResponses: [][]LastCommunication{}, + ListResponses: [][]*Config{}, + UpsertCalls: []*Config{}, + } +} + +func (c *mockAlertsClient) Get(ctx context.Context, conf *Config) (*Config, error) { + return nil, nil +} + +func (c *mockAlertsClient) Upsert(ctx context.Context, conf *Config) error { + if conf == nil { + c.UpsertCalls = append(c.UpsertCalls, nil) + } else { + copyConf := *conf + c.UpsertCalls = append(c.UpsertCalls, ©Conf) + } + if c.UpsertError != nil { + return c.UpsertError + } + return nil +} + +func (c *mockAlertsClient) Delete(ctx context.Context, conf *Config) error { + return nil +} + +func (c *mockAlertsClient) List(ctx context.Context, userID string) ([]*Config, error) { + if c.ListError != nil { + return nil, c.ListError + } + if len(c.ListResponses) > 0 { + ret := c.ListResponses[0] + c.ListResponses = c.ListResponses[1:] + return ret, nil + } + return []*Config{}, nil +} + +func (c *mockAlertsClient) OverdueCommunications(context.Context) ( + []LastCommunication, error) { + + if c.OverdueCommunicationsError != nil { + return nil, c.OverdueCommunicationsError + } + if len(c.OverdueCommunicationsResponses) > 0 { + ret := c.OverdueCommunicationsResponses[0] + c.OverdueCommunicationsResponses = c.OverdueCommunicationsResponses[1:] + return ret, nil + } + return nil, nil +} + +func (c *mockAlertsClient) EnsureIndexes() error { + return nil +} + +type mockDataRepo struct { + AlertableData []*GetAlertableDataResponse +} + +func newMockDataRepo() *mockDataRepo { + return &mockDataRepo{ + AlertableData: []*GetAlertableDataResponse{}, + } +} + +func (r *mockDataRepo) GetAlertableData(ctx context.Context, params GetAlertableDataParams) ( + *GetAlertableDataResponse, error) { + + if len(r.AlertableData) > 0 { + ret := r.AlertableData[0] + r.AlertableData = r.AlertableData[1:] + return ret, nil + } + + return &GetAlertableDataResponse{ + DosingDecisions: []*dosingdecision.DosingDecision{}, + Glucose: []*glucose.Glucose{}, + }, nil +} + +type mockPermissionClient struct { + AlwaysAllow bool + Perms map[string]permission.Permissions +} + +func newMockPermissionClient() *mockPermissionClient { + return &mockPermissionClient{ + Perms: map[string]permission.Permissions{}, + } +} + +func (c *mockPermissionClient) GetUserPermissions(ctx context.Context, + requestUserID string, targetUserID string) (permission.Permissions, error) { + + if c.AlwaysAllow { + return map[string]permission.Permission{ + permission.Follow: {}, + permission.Read: {}, + }, nil + } + + if p, ok := c.Perms[c.Key(requestUserID, targetUserID)]; ok { + return p, nil + } else { + return nil, errors.New("test error NOT FOUND") + } +} + +func (c *mockPermissionClient) Allow(requestUserID, targetUserID string, perms ...string) { + key := c.Key(requestUserID, targetUserID) + if _, found := c.Perms[key]; !found { + c.Perms[key] = permission.Permissions{} + } + for _, perm := range perms { + c.Perms[key][perm] = permission.Permission{} + } +} + +func (c *mockPermissionClient) Key(requesterUserID, targetUserID string) string { + return requesterUserID + targetUserID +} diff --git a/alerts/tasks.go b/alerts/tasks.go new file mode 100644 index 0000000000..34d77b7966 --- /dev/null +++ b/alerts/tasks.go @@ -0,0 +1,215 @@ +package alerts + +import ( + "context" + "slices" + "time" + + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" + "github.com/tidepool-org/platform/push" + "github.com/tidepool-org/platform/task" +) + +const CarePartnerType = "org.tidepool.carepartner" + +func NewCarePartnerTaskCreate() *task.TaskCreate { + return &task.TaskCreate{ + Name: pointer.FromAny(CarePartnerType), + Type: CarePartnerType, + AvailableTime: &time.Time{}, + Data: map[string]interface{}{}, + } +} + +type CarePartnerRunner struct { + logger log.Logger + + alerts AlertsClient + authClient auth.ServerSessionTokenProvider + deviceTokens auth.DeviceTokensClient + permissions permission.Client + pusher Pusher +} + +// AlertsClient abstracts the alerts collection for the CarePartnerRunner. +// +// One implementation is [Client]. +type AlertsClient interface { + List(_ context.Context, followedUserID string) ([]*Config, error) + Upsert(context.Context, *Config) error + // OverdueCommunications returns a slice of [LastCommunication] for users that haven't + // uploaded data recently. + OverdueCommunications(context.Context) ([]LastCommunication, error) +} + +func NewCarePartnerRunner(logger log.Logger, alerts AlertsClient, + deviceTokens auth.DeviceTokensClient, pusher Pusher, permissions permission.Client, + authClient auth.ServerSessionTokenProvider) (*CarePartnerRunner, error) { + + return &CarePartnerRunner{ + logger: logger, + alerts: alerts, + authClient: authClient, + deviceTokens: deviceTokens, + pusher: pusher, + permissions: permissions, + }, nil +} + +func (r *CarePartnerRunner) GetRunnerType() string { + return CarePartnerType +} + +func (r *CarePartnerRunner) GetRunnerTimeout() time.Duration { + return 30 * time.Second +} + +func (r *CarePartnerRunner) GetRunnerDeadline() time.Time { + return time.Now().Add(r.GetRunnerDurationMaximum()) +} + +func (r *CarePartnerRunner) GetRunnerDurationMaximum() time.Duration { + return 30 * time.Second +} + +func (r *CarePartnerRunner) Run(ctx context.Context, tsk *task.Task) { + r.logger.Info("care partner no communication check") + start := time.Now() + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, r.authClient) + if err := r.evaluateLastComms(ctx); err != nil { + r.logger.WithError(err).Warn("running care partner no communication check") + } + tsk.RepeatAvailableAfter(time.Second - time.Since(start)) +} + +func (r *CarePartnerRunner) evaluateLastComms(ctx context.Context) error { + overdue, err := r.alerts.OverdueCommunications(ctx) + if err != nil { + return errors.Wrap(err, "listing users without communication") + } + + for _, lastComm := range overdue { + if err := r.evaluateLastComm(ctx, lastComm); err != nil { + r.logger.WithError(err). + WithField("followedUserID", lastComm.UserID). + WithField("dataSetID", lastComm.DataSetID). + Info("Unable to evaluate no communication") + continue + } + } + + return nil +} + +func (r *CarePartnerRunner) evaluateLastComm(ctx context.Context, + lastComm LastCommunication) error { + + configs, err := r.alerts.List(ctx, lastComm.UserID) + if err != nil { + return errors.Wrap(err, "listing follower alerts configs") + } + + configs = slices.DeleteFunc(configs, r.authDenied(ctx)) + configs = slices.DeleteFunc(configs, func(config *Config) bool { + return config.UploadID != lastComm.DataSetID + }) + + notifications := []*Notification{} + for _, config := range configs { + lgr := config.LoggerWithFields(r.logger) + lastData := lastComm.LastReceivedDeviceData + notification, needsUpsert := config.EvaluateNoCommunication(ctx, lgr, lastData) + if notification != nil { + notification.Sent = r.wrapWithUpsert(ctx, lgr, config, notification.Sent) + notifications = append(notifications, notification) + } + if needsUpsert { + err := r.alerts.Upsert(ctx, config) + if err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } + } + + r.pushNotifications(ctx, notifications) + + return nil +} + +// wrapWithUpsert to upsert the Config that triggered the Notification after it's sent. +func (r *CarePartnerRunner) wrapWithUpsert(ctx context.Context, lgr log.Logger, config *Config, + original func(time.Time)) func(time.Time) { + + return func(at time.Time) { + if original != nil { + original(at) + } + if err := r.alerts.Upsert(ctx, config); err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } +} + +func (r *CarePartnerRunner) authDenied(ctx context.Context) func(*Config) bool { + return func(c *Config) bool { + if c == nil { + return true + } + logger := r.logger.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + }) + perms, err := r.permissions.GetUserPermissions(ctx, c.UserID, c.FollowedUserID) + if err != nil { + logger.WithError(err).Warn("Unable to confirm permissions; skipping") + return true + } + if _, found := perms[permission.Follow]; !found { + logger.Debug("permission denied: skipping") + return true + } + return false + } +} + +func (r *CarePartnerRunner) pushNotifications(ctx context.Context, + notifications []*Notification) { + + for _, notification := range notifications { + lgr := r.logger.WithField("recipientUserID", notification.RecipientUserID) + tokens, err := r.deviceTokens.GetDeviceTokens(ctx, notification.RecipientUserID) + if err != nil { + lgr.WithError(err).Info("unable to retrieve device tokens") + } + if len(tokens) == 0 { + lgr.Debug("no device tokens found, won't push any notifications") + } + pushNotification := ToPushNotification(notification) + for _, token := range tokens { + err := r.pusher.Push(ctx, token, pushNotification) + if err != nil { + lgr.WithError(err).Info("unable to push notification") + } else { + notification.Sent(time.Now()) + } + } + } +} + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} + +// ToPushNotification converts Notification to push.Notification. +func ToPushNotification(notification *Notification) *push.Notification { + return &push.Notification{ + Message: notification.Message, + } +} diff --git a/alerts/tasks_test.go b/alerts/tasks_test.go new file mode 100644 index 0000000000..d2b683fbf1 --- /dev/null +++ b/alerts/tasks_test.go @@ -0,0 +1,335 @@ +package alerts + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/push" + "github.com/tidepool-org/platform/task" +) + +var _ = Describe("CarePartnerRunner", func() { + Describe("Run", func() { + It("schedules its next run for 1 second", func() { + runner, test := newCarePartnerRunnerTest() + + start := time.Now() + runner.Run(test.Ctx, test.Task) + + if Expect(test.Task.AvailableTime).ToNot(BeNil()) { + Expect(*test.Task.AvailableTime).To(BeTemporally("~", start.Add(time.Second))) + } + Expect(test.Task.DeadlineTime).To(BeNil()) + Expect(test.Task.State).To(Equal(task.TaskStatePending)) + }) + + Context("continues after logging errors", func() { + It("retrieving users without communication", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.OverdueCommunicationsError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + test.Logger.AssertWarn("running care partner no communication check") + }) + + It("retrieving an alerts config", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.ListError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + Expect(func() { + test.Logger.AssertInfo("Unable to evaluate no communication", log.Fields{ + "followedUserID": mockUserID2, + }) + }).ToNot(Panic(), map[string]any{ + "got": quickJSON(test.Logger.SerializedFields), + }) + }) + + It("upsetting alerts configs", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.UpsertError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + Expect(func() { + test.Logger.AssertError("Unable to upsert changed alerts config", log.Fields{ + "userID": mockUserID1, + "followedUserID": mockUserID2, + "dataSetID": mockDataSetID, + }) + }).ToNot(Panic(), quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) + }) + + It("retrieving device tokens", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + Expect(func() { + test.Logger.AssertInfo("unable to retrieve device tokens", log.Fields{ + "recipientUserID": mockUserID1, + }) + }, quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) + }) + + It("pushing notifications", func() { + runner, test := newCarePartnerRunnerTest() + test.Pusher.PushErrors = append(test.Pusher.PushErrors, fmt.Errorf("test error")) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + Expect(func() { + test.Logger.AssertInfo("unable to push notification", log.Fields{ + "recipientUserID": testUserID, + }) + }, quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) + }) + }) + + It("ignores Configs that don't match the data set id", func() { + runner, test := newCarePartnerRunnerTest() + firstResp := test.Alerts.OverdueCommunicationsResponses[0] + test.Alerts.OverdueCommunicationsResponses[0] = append(firstResp, LastCommunication{ + UserID: firstResp[0].UserID, + DataSetID: "non-matching", + LastReceivedDeviceData: firstResp[0].LastReceivedDeviceData, + }) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + }) + + It("pushes to each token", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetResponses[0] = append(test.Tokens.GetResponses[0], + test.Tokens.GetResponses[0][0]) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(2)) + }) + + It("pushes to each token, continuing if any experience an error", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetResponses[0] = append(test.Tokens.GetResponses[0], + test.Tokens.GetResponses[0][0]) + test.Pusher.PushErrors = append([]error{fmt.Errorf("test error")}, test.Pusher.PushErrors...) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(2)) + }) + + It("ignores Configs that don't have permission", func() { + runner, test := newCarePartnerRunnerTest() + // disable permissions, no configs should be used + test.Permissions.AlwaysAllow = false + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Pusher.PushCalls)).To(Equal(0)) + + // reset, add a user *with* perms, and check that it works + runner, test = newCarePartnerRunnerTest() + test.Permissions.AlwaysAllow = false + test.Permissions.Allow(mockUserID3, mockUserID2, permission.Follow, permission.Read) + cfg := *test.Config + cfg.UserID = mockUserID3 + test.Alerts.ListResponses[0] = append(test.Alerts.ListResponses[0], &cfg) + runner.Run(test.Ctx, test.Task) + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + }) + + It("upserts configs that need it", func() { + runner, test := newCarePartnerRunnerTest() + runner.Run(test.Ctx, test.Task) + + // One call from needsUpsert, another when the notification is sent. + Expect(len(test.Alerts.UpsertCalls)).To(Equal(2)) + act0 := test.Alerts.UpsertCalls[0].Activity.NoCommunication + Expect(act0.Triggered).ToNot(BeZero()) + Expect(act0.Sent).To(BeZero()) + act1 := test.Alerts.UpsertCalls[1].Activity.NoCommunication + Expect(act1.Sent).ToNot(BeZero()) + }) + + It("upserts configs that need it, even without a notification", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + act.Triggered = time.Now().Add(-time.Hour) + act.Sent = time.Now().Add(-time.Hour) + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + test.Alerts.OverdueCommunicationsResponses[0][0].LastReceivedDeviceData = time.Now() + + runner.Run(test.Ctx, test.Task) + + // One call from needsUpsert, no call from sent (no notification to send) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(1)) + act0 := test.Alerts.UpsertCalls[0].Activity.NoCommunication + Expect(act0.Resolved).To(BeTemporally("~", time.Now())) + }) + + It("doesn't re-mark itself resolved", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + act.Triggered = time.Now().Add(-time.Hour) + act.Sent = time.Now().Add(-time.Hour) + act.Resolved = time.Now().Add(-time.Minute) + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + test.Alerts.OverdueCommunicationsResponses[0][0].LastReceivedDeviceData = time.Now() + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(0)) + }) + + It("doesn't re-send before delay", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + orig := time.Now().Add(-time.Minute) + act.Triggered = orig + act.Sent = orig + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(0)) + }) + }) +}) + +var _ = Describe("NewCarePartnerTaskCreate", func() { + It("succeeds", func() { + Expect(func() { + Expect(NewCarePartnerTaskCreate()).ToNot(Equal(nil)) + }).ToNot(Panic()) + }) +}) + +type carePartnerRunnerTest struct { + Alerts *mockAlertsClient + Config *Config + Ctx context.Context + Logger *logtest.Logger + Permissions *mockPermissionClient + Pusher *mockPusher + Task *task.Task + Tokens *mockDeviceTokensClient +} + +func newCarePartnerRunnerTest() (*CarePartnerRunner, *carePartnerRunnerTest) { + alerts := newMockAlertsClient() + ctx, lgr, cfg := newConfigTest() + cfg.Alerts.NoCommunication.Enabled = true + pusher := newMockPusher() + tsk := &task.Task{} + tokens := newMockDeviceTokensClient() + perms := newMockPermissionClient() + perms.AlwaysAllow = true + authClient := newMockAuthTokenProvider() + + runner, err := NewCarePartnerRunner(lgr, alerts, tokens, pusher, perms, authClient) + Expect(err).To(Succeed()) + + last := time.Now().Add(-(DefaultNoCommunicationDelay + time.Second)) + alerts.OverdueCommunicationsResponses = [][]LastCommunication{{ + { + UserID: mockUserID2, + DataSetID: mockDataSetID, + LastReceivedDeviceData: last, + }, + }} + alerts.ListResponses = [][]*Config{{cfg}} + tokens.GetResponses = [][]*devicetokens.DeviceToken{ + { + {Apple: &devicetokens.AppleDeviceToken{}}, + }, + } + + return runner, &carePartnerRunnerTest{ + Alerts: alerts, + Config: cfg, + Ctx: ctx, + Logger: lgr, + Permissions: perms, + Pusher: pusher, + Task: tsk, + Tokens: tokens, + } +} + +type mockDeviceTokensClient struct { + GetError error + GetResponses [][]*devicetokens.DeviceToken +} + +func newMockDeviceTokensClient() *mockDeviceTokensClient { + return &mockDeviceTokensClient{ + GetResponses: [][]*devicetokens.DeviceToken{}, + } +} + +func (c *mockDeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + if c.GetError != nil { + return nil, c.GetError + } + if len(c.GetResponses) > 0 { + ret := c.GetResponses[0] + c.GetResponses = c.GetResponses[1:] + return ret, nil + } + return nil, nil +} + +type mockPusher struct { + PushCalls []pushCall + PushErrors []error +} + +type pushCall struct { + Token *devicetokens.DeviceToken + Notification *push.Notification +} + +func newMockPusher() *mockPusher { + return &mockPusher{} +} + +func (p *mockPusher) Push(_ context.Context, + token *devicetokens.DeviceToken, notification *push.Notification) error { + + p.PushCalls = append(p.PushCalls, pushCall{token, notification}) + if len(p.PushErrors) > 0 { + err := p.PushErrors[0] + p.PushErrors = p.PushErrors[1:] + return err + } + return nil +} + +type mockAuthTokenProvider struct{} + +func newMockAuthTokenProvider() *mockAuthTokenProvider { + return &mockAuthTokenProvider{} +} + +func (p *mockAuthTokenProvider) ServerSessionToken() (string, error) { + return "", nil +} diff --git a/auth/auth.go b/auth/auth.go index e728beac54..976dbd1f43 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -3,6 +3,7 @@ package auth import ( "context" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" ) @@ -21,6 +22,7 @@ type Client interface { RestrictedTokenAccessor ExternalAccessor permission.Client + DeviceTokensClient } type ExternalAccessor interface { @@ -51,3 +53,9 @@ func ServerSessionTokenProviderFromContext(ctx context.Context) ServerSessionTok type contextKey string const serverSessionTokenProviderContextKey contextKey = "serverSessionTokenProvider" + +// DeviceTokensClient provides access to the tokens used to authenticate +// mobile device push notifications. +type DeviceTokensClient interface { + GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) +} diff --git a/auth/client/client.go b/auth/client/client.go index a4c8511a27..d29f6561ba 100644 --- a/auth/client/client.go +++ b/auth/client/client.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/page" @@ -308,6 +309,18 @@ func (c *Client) DeleteRestrictedToken(ctx context.Context, id string) error { return c.client.RequestData(ctx, http.MethodDelete, url, nil, nil, nil) } +// GetDeviceTokens belonging to a given user. +func (c *Client) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + ctx = log.NewContextWithLogger(ctx, c.logger) + url := c.client.ConstructURL("v1", "users", userID, "device_tokens") + tokens := []*devicetokens.DeviceToken{} + err := c.client.RequestData(ctx, http.MethodGet, url, nil, nil, &tokens) + if err != nil { + return nil, errors.Wrap(err, "Unable to request device token data") + } + return tokens, nil +} + type ConfigLoader interface { Load(*Config) error } diff --git a/auth/client/client_test.go b/auth/client/client_test.go index fbd9a6be14..23db62b316 100644 --- a/auth/client/client_test.go +++ b/auth/client/client_test.go @@ -2,6 +2,7 @@ package client_test import ( "context" + "encoding/json" "net/http" "time" @@ -14,6 +15,7 @@ import ( "github.com/tidepool-org/platform/auth" authClient "github.com/tidepool-org/platform/auth/client" authTest "github.com/tidepool-org/platform/auth/test" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" errorsTest "github.com/tidepool-org/platform/errors/test" "github.com/tidepool-org/platform/log" @@ -472,6 +474,64 @@ var _ = Describe("Client", func() { }) }) }) + + Describe("GetDeviceTokens", func() { + var testUserID = "test-user-id" + var testUserIDBadResponse = "test-user-id-bad-response" + var testTokens = map[string]any{ + testUserID: []*devicetokens.DeviceToken{{ + Apple: &devicetokens.AppleDeviceToken{ + Token: []byte("blah"), + Environment: "sandbox", + }, + }}, + testUserIDBadResponse: []map[string]any{ + { + "Apple": "", + }, + }, + } + + It("returns a token", func() { + body, err := json.Marshal(testTokens[testUserID]) + Expect(err).To(Succeed()) + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserID+"/device_tokens"), + RespondWith(http.StatusOK, body)), + ) + + tokens, err := client.GetDeviceTokens(ctx, testUserID) + Expect(err).To(Succeed()) + Expect(tokens).To(HaveLen(1)) + Expect([]byte(tokens[0].Apple.Token)).To(Equal([]byte("blah"))) + Expect(tokens[0].Apple.Environment).To(Equal("sandbox")) + }) + + It("returns an error when receiving malformed responses", func() { + body, err := json.Marshal(testTokens[testUserIDBadResponse]) + Expect(err).To(Succeed()) + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserIDBadResponse+"/device_tokens"), + RespondWith(http.StatusOK, body)), + ) + + _, err = client.GetDeviceTokens(ctx, testUserIDBadResponse) + Expect(err).To(HaveOccurred()) + }) + + It("returns an error on non-200 responses", func() { + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserID+"/device_tokens"), + RespondWith(http.StatusBadRequest, nil)), + ) + _, err := client.GetDeviceTokens(ctx, testUserID) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("Unable to request device token data"))) + }) + }) }) }) }) diff --git a/auth/service/api/v1/auth_service_mock.go b/auth/service/api/v1/auth_service_mock.go index 00dadd0dbf..99bc534bfa 100644 --- a/auth/service/api/v1/auth_service_mock.go +++ b/auth/service/api/v1/auth_service_mock.go @@ -10,7 +10,6 @@ import ( gomock "github.com/golang/mock/gomock" api "github.com/tidepool-org/hydrophone/client" - apple "github.com/tidepool-org/platform/apple" appvalidate "github.com/tidepool-org/platform/appvalidate" auth "github.com/tidepool-org/platform/auth" diff --git a/auth/service/api/v1/devicetokens.go b/auth/service/api/v1/devicetokens.go index c19c654343..99d6b2ede1 100644 --- a/auth/service/api/v1/devicetokens.go +++ b/auth/service/api/v1/devicetokens.go @@ -13,6 +13,7 @@ import ( func (r *Router) DeviceTokensRoutes() []*rest.Route { return []*rest.Route{ rest.Post("/v1/users/:userId/device_tokens", api.RequireUser(r.UpsertDeviceToken)), + rest.Get("/v1/users/:userId/device_tokens", api.RequireAuth(r.GetDeviceTokens)), } } @@ -39,3 +40,27 @@ func (r *Router) UpsertDeviceToken(res rest.ResponseWriter, req *rest.Request) { return } } + +func (r *Router) GetDeviceTokens(res rest.ResponseWriter, req *rest.Request) { + responder := request.MustNewResponder(res, req) + ctx := req.Request.Context() + authDetails := request.GetAuthDetails(ctx) + repo := r.AuthStore().NewDeviceTokenRepository() + userID := req.PathParam("userId") + + if userID != authDetails.UserID() && !authDetails.IsService() { + responder.Error(http.StatusForbidden, request.ErrorUnauthorized()) + return + } + + docs, err := repo.GetAllByUserID(ctx, userID) + if err != nil { + responder.Error(http.StatusInternalServerError, err) + return + } + tokens := make([]devicetokens.DeviceToken, 0, len(docs)) + for _, doc := range docs { + tokens = append(tokens, doc.DeviceToken) + } + responder.Data(http.StatusOK, tokens) +} diff --git a/auth/service/api/v1/devicetokens_test.go b/auth/service/api/v1/devicetokens_test.go index 1033b7cc9c..223208b617 100644 --- a/auth/service/api/v1/devicetokens_test.go +++ b/auth/service/api/v1/devicetokens_test.go @@ -3,6 +3,7 @@ package v1 import ( "bytes" "context" + "encoding/json" "fmt" "io" "net/http" @@ -12,14 +13,18 @@ import ( . "github.com/onsi/gomega" serviceTest "github.com/tidepool-org/platform/auth/service/test" + storetest "github.com/tidepool-org/platform/auth/store/test" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/request" "github.com/tidepool-org/platform/service/test" ) var _ = Describe("Device tokens endpoints", func() { var rtr *Router + var svc *serviceTest.Service + BeforeEach(func() { - svc := serviceTest.NewService() + svc = serviceTest.NewService() var err error rtr, err = NewRouter(svc) Expect(err).ToNot(HaveOccurred()) @@ -66,6 +71,64 @@ var _ = Describe("Device tokens endpoints", func() { }) + Describe("List", func() { + It("succeeds with valid input", func() { + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusOK)) + }) + + It("rejects non-service users", func() { + svcDetails := test.NewMockAuthDetails(request.MethodAccessToken, "test-user", test.TestToken2) + req := newDeviceTokensTestRequest(svcDetails, nil, "") + res := test.NewMockRestResponseWriter() + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusForbidden)) + }) + + It("may return multiple documents", func() { + repo := &storetest.DeviceTokenRepository{ + Tokens: map[string][]*devicetokens.DeviceToken{ + test.TestUserID1: { + &devicetokens.DeviceToken{}, + &devicetokens.DeviceToken{}, + }, + }, + } + + raw := rtr.Service.AuthStore().(*storetest.Store) + raw.NewDeviceTokenRepositoryImpl = repo + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusOK)) + got := []*devicetokens.DeviceToken{} + err := json.Unmarshal(res.Body.Bytes(), &got) + Expect(err).To(Succeed()) + Expect(got).To(HaveLen(2)) + }) + + It("handles repository errors", func() { + repo := &storetest.DeviceTokenRepository{ + Error: fmt.Errorf("test error"), + } + raw := rtr.Service.AuthStore().(*storetest.Store) + raw.NewDeviceTokenRepositoryImpl = repo + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusInternalServerError)) + }) + }) }) func buff(template string, args ...any) *bytes.Buffer { @@ -91,5 +154,4 @@ func newDeviceTokensTestRequest(auth request.AuthDetails, body io.Reader, userID Request: httpReq, PathParams: map[string]string{"userId": userIDFromPath}, } - } diff --git a/auth/service/service/client.go b/auth/service/service/client.go index 4335eae5db..71d27ad2dd 100644 --- a/auth/service/service/client.go +++ b/auth/service/service/client.go @@ -6,6 +6,7 @@ import ( "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/auth/client" authStore "github.com/tidepool-org/platform/auth/store" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/page" @@ -102,6 +103,19 @@ func (c *Client) DeleteAllProviderSessions(ctx context.Context, userID string) e return repository.DeleteAllProviderSessions(ctx, userID) } +func (c *Client) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + repo := c.authStore.NewDeviceTokenRepository() + docs, err := repo.GetAllByUserID(ctx, userID) + if err != nil { + return nil, err + } + tokens := make([]*devicetokens.DeviceToken, 0, len(docs)) + for _, doc := range docs { + tokens = append(tokens, &doc.DeviceToken) + } + return tokens, nil +} + func (c *Client) GetProviderSession(ctx context.Context, id string) (*auth.ProviderSession, error) { repository := c.authStore.NewProviderSessionRepository() return repository.GetProviderSession(ctx, id) diff --git a/auth/service/service/client_test.go b/auth/service/service/client_test.go index 9a8a94e85d..26792ca30e 100644 --- a/auth/service/service/client_test.go +++ b/auth/service/service/client_test.go @@ -1,8 +1,123 @@ package service_test import ( + "context" + "fmt" + "time" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "github.com/onsi/gomega/ghttp" + + "github.com/tidepool-org/platform/appvalidate" + "github.com/tidepool-org/platform/auth/client" + "github.com/tidepool-org/platform/auth/service/service" + "github.com/tidepool-org/platform/auth/store" + storetest "github.com/tidepool-org/platform/auth/store/test" + platformclient "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/devicetokens" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/provider" ) var _ = Describe("Client", func() { + var testUserID = "test-user-id" + var testDeviceToken1 = &devicetokens.DeviceToken{ + Apple: &devicetokens.AppleDeviceToken{ + Token: []byte("test"), + Environment: "sandbox", + }, + } + + newTestServiceClient := func(url string, authStore store.Store) *service.Client { + var err error + extCfg := &client.ExternalConfig{ + Config: &platform.Config{ + Config: &platformclient.Config{ + Address: url, + UserAgent: "test", + }, + ServiceSecret: "", + }, + ServerSessionTokenSecret: "test token", + ServerSessionTokenTimeout: time.Minute, + } + authAs := platform.AuthorizeAsService + name := "test auth client" + logger := logtest.NewLogger() + if authStore == nil { + repo := storetest.NewDeviceTokenRepository() + repo.Tokens = map[string][]*devicetokens.DeviceToken{ + testUserID: { + testDeviceToken1, + }} + + authStore = &mockAuthStore{ + DeviceTokenRepository: repo, + } + } + providerFactory := &mockProviderFactory{} + serviceClient, err := service.NewClient(extCfg, authAs, name, logger, authStore, providerFactory) + Expect(err).To(Succeed()) + return serviceClient + } + + Describe("GetDeviceTokens", func() { + It("returns a slice of tokens", func() { + ctx := context.Background() + server := NewServer() + defer server.Close() + serviceClient := newTestServiceClient(server.URL(), nil) + + tokens, err := serviceClient.GetDeviceTokens(ctx, testUserID) + + Expect(err).To(Succeed()) + Expect(tokens).To(HaveLen(1)) + Expect(tokens[0]).To(Equal(testDeviceToken1)) + }) + + It("handles errors from the underlying repo", func() { + ctx := context.Background() + server := NewServer() + defer server.Close() + repo := storetest.NewDeviceTokenRepository() + repo.Error = fmt.Errorf("test error") + authStore := &mockAuthStore{ + DeviceTokenRepository: repo, + } + serviceClient := newTestServiceClient(server.URL(), authStore) + + _, err := serviceClient.GetDeviceTokens(ctx, testUserID) + + Expect(err).To(HaveOccurred()) + }) + }) }) + +type mockAuthStore struct { + store.DeviceTokenRepository +} + +func (s *mockAuthStore) NewAppValidateRepository() appvalidate.Repository { + return nil +} + +func (s *mockAuthStore) NewProviderSessionRepository() store.ProviderSessionRepository { + return nil +} + +func (s *mockAuthStore) NewRestrictedTokenRepository() store.RestrictedTokenRepository { + return nil +} + +func (s *mockAuthStore) NewDeviceTokenRepository() store.DeviceTokenRepository { + return s.DeviceTokenRepository +} + +type mockProviderFactory struct{} + +func (f *mockProviderFactory) Get(typ string, name string) (provider.Provider, error) { + return nil, nil +} diff --git a/auth/store/mongo/device_tokens_repository.go b/auth/store/mongo/device_tokens_repository.go index 4a257ca9f0..d2bfad7a41 100644 --- a/auth/store/mongo/device_tokens_repository.go +++ b/auth/store/mongo/device_tokens_repository.go @@ -16,6 +16,20 @@ import ( // MongoDB collection. type deviceTokenRepo structuredmongo.Repository +func (r *deviceTokenRepo) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + f := bson.M{"userId": userID} + cursor, err := r.Find(ctx, f, nil) + if err != nil { + return nil, err + } + defer cursor.Close(ctx) + var docs []*devicetokens.Document + if err := cursor.All(ctx, &docs); err != nil { + return nil, err + } + return docs, nil +} + // Upsert will create or update the given Config. func (r *deviceTokenRepo) Upsert(ctx context.Context, doc *devicetokens.Document) error { // The presence of UserID and TokenID should be enforced with a mongodb @@ -24,7 +38,7 @@ func (r *deviceTokenRepo) Upsert(ctx context.Context, doc *devicetokens.Document return errors.New("UserID is empty") } if doc.TokenKey == "" { - return errors.New("TokenID is empty") + return errors.New("TokenKey is empty") } opts := options.Update().SetUpsert(true) diff --git a/auth/store/mongo/device_tokens_repository_test.go b/auth/store/mongo/device_tokens_repository_test.go new file mode 100644 index 0000000000..6eb45b2221 --- /dev/null +++ b/auth/store/mongo/device_tokens_repository_test.go @@ -0,0 +1,76 @@ +package mongo + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/auth/store" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/store/structured/mongo" + storeStructuredMongoTest "github.com/tidepool-org/platform/store/structured/mongo/test" +) + +const testUserID = "857ec1d7-8777-4877-a308-96a23c066524" + +var _ = Describe("deviceTokenRepo", Label("mongodb", "slow", "integration"), func() { + It("retrieves all for the given user id", func() { + test := newDeviceTokensRepoTest() + + docs, err := test.Repo.GetAllByUserID(test.Ctx, testUserID) + Expect(err).To(Succeed()) + + if Expect(docs).To(HaveLen(2)) { + for _, doc := range docs { + Expect(doc.UserID).To(Equal(testUserID)) + } + } + }) + + It("ensures indexes", func() { + test := newDeviceTokensRepoTest() + Expect(test.Repo.EnsureIndexes()).To(Succeed()) + }) +}) + +type deviceTokensRepoTest struct { + Ctx context.Context + Repo store.DeviceTokenRepository + Config *mongo.Config + Store *Store +} + +func newDeviceTokensRepoTest() *deviceTokensRepoTest { + test := &deviceTokensRepoTest{ + Ctx: context.Background(), + Config: storeStructuredMongoTest.NewConfig(), + } + store, err := NewStore(test.Config) + Expect(err).To(Succeed()) + test.Store = store + test.Repo = store.NewDeviceTokenRepository() + + testDocs := []*devicetokens.Document{ + { + UserID: testUserID, + TokenKey: "a", + DeviceToken: devicetokens.DeviceToken{}, + }, + { + UserID: testUserID, + TokenKey: "b", + DeviceToken: devicetokens.DeviceToken{}, + }, + { + UserID: "not" + testUserID, + TokenKey: "c", + DeviceToken: devicetokens.DeviceToken{}, + }, + } + for _, testDoc := range testDocs { + Expect(test.Repo.Upsert(test.Ctx, testDoc)).To(Succeed()) + } + + return test +} diff --git a/auth/store/mongo/store_test.go b/auth/store/mongo/store_test.go index d12b0fba0f..34eec13f1b 100644 --- a/auth/store/mongo/store_test.go +++ b/auth/store/mongo/store_test.go @@ -133,7 +133,7 @@ var _ = Describe("Store", func() { doc.UserID = "user-id" doc.TokenKey = "" err = repository.Upsert(ctx, doc) - Expect(err).To(MatchError("TokenID is empty")) + Expect(err).To(MatchError("TokenKey is empty")) }) It("updates the existing document, instead of creating a duplicate", func() { diff --git a/auth/store/test/device_token_repository.go b/auth/store/test/device_token_repository.go index 4847596895..a15f913af1 100644 --- a/auth/store/test/device_token_repository.go +++ b/auth/store/test/device_token_repository.go @@ -9,6 +9,9 @@ import ( type DeviceTokenRepository struct { *authTest.DeviceTokenAccessor + Documents []*devicetokens.Document + Tokens map[string][]*devicetokens.DeviceToken + Error error } func NewDeviceTokenRepository() *DeviceTokenRepository { @@ -21,6 +24,20 @@ func (r *DeviceTokenRepository) Expectations() { r.DeviceTokenAccessor.Expectations() } +func (r *DeviceTokenRepository) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + if r.Error != nil { + return nil, r.Error + } + if tokens, ok := r.Tokens[userID]; ok { + docs := make([]*devicetokens.Document, 0, len(tokens)) + for _, token := range tokens { + docs = append(docs, &devicetokens.Document{DeviceToken: *token}) + } + return docs, nil + } + return nil, nil +} + func (r *DeviceTokenRepository) Upsert(ctx context.Context, doc *devicetokens.Document) error { return nil } diff --git a/auth/test/client.go b/auth/test/client.go index e500f69d34..9fba8f4e5c 100644 --- a/auth/test/client.go +++ b/auth/test/client.go @@ -4,6 +4,7 @@ type Client struct { *ProviderSessionAccessor *RestrictedTokenAccessor *ExternalAccessor + *DeviceTokensClient } func NewClient() *Client { @@ -11,6 +12,7 @@ func NewClient() *Client { ProviderSessionAccessor: NewProviderSessionAccessor(), RestrictedTokenAccessor: NewRestrictedTokenAccessor(), ExternalAccessor: NewExternalAccessor(), + DeviceTokensClient: NewDeviceTokensClient(), } } diff --git a/auth/test/external_accessor.go b/auth/test/external_accessor.go index 1916c1cf28..a7872e4c34 100644 --- a/auth/test/external_accessor.go +++ b/auth/test/external_accessor.go @@ -3,6 +3,7 @@ package test import ( "context" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" ) @@ -179,3 +180,11 @@ func (e *ExternalAccessor) GetUserPermissions(ctx context.Context, requestUserID } panic("GetUserPermissions no output") } + +func NewDeviceTokensClient() *DeviceTokensClient { return &DeviceTokensClient{} } + +type DeviceTokensClient struct{} + +func (c *DeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + return nil, nil +} diff --git a/auth/test/mock.go b/auth/test/mock.go index 0146c6bb9d..878d3a4544 100644 --- a/auth/test/mock.go +++ b/auth/test/mock.go @@ -9,8 +9,8 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - auth "github.com/tidepool-org/platform/auth" + devicetokens "github.com/tidepool-org/platform/devicetokens" page "github.com/tidepool-org/platform/page" permission "github.com/tidepool-org/platform/permission" request "github.com/tidepool-org/platform/request" @@ -168,6 +168,21 @@ func (mr *MockClientMockRecorder) EnsureAuthorizedUser(ctx, targetUserID, permis return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureAuthorizedUser", reflect.TypeOf((*MockClient)(nil).EnsureAuthorizedUser), ctx, targetUserID, permission) } +// GetDeviceTokens mocks base method. +func (m *MockClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceTokens", ctx, userID) + ret0, _ := ret[0].([]*devicetokens.DeviceToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeviceTokens indicates an expected call of GetDeviceTokens. +func (mr *MockClientMockRecorder) GetDeviceTokens(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceTokens", reflect.TypeOf((*MockClient)(nil).GetDeviceTokens), ctx, userID) +} + // GetProviderSession mocks base method. func (m *MockClient) GetProviderSession(ctx context.Context, id string) (*auth.ProviderSession, error) { m.ctrl.T.Helper() @@ -436,3 +451,41 @@ func (mr *MockServerSessionTokenProviderMockRecorder) ServerSessionToken() *gomo mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServerSessionToken", reflect.TypeOf((*MockServerSessionTokenProvider)(nil).ServerSessionToken)) } + +// MockDeviceTokensClient is a mock of DeviceTokensClient interface. +type MockDeviceTokensClient struct { + ctrl *gomock.Controller + recorder *MockDeviceTokensClientMockRecorder +} + +// MockDeviceTokensClientMockRecorder is the mock recorder for MockDeviceTokensClient. +type MockDeviceTokensClientMockRecorder struct { + mock *MockDeviceTokensClient +} + +// NewMockDeviceTokensClient creates a new mock instance. +func NewMockDeviceTokensClient(ctrl *gomock.Controller) *MockDeviceTokensClient { + mock := &MockDeviceTokensClient{ctrl: ctrl} + mock.recorder = &MockDeviceTokensClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeviceTokensClient) EXPECT() *MockDeviceTokensClientMockRecorder { + return m.recorder +} + +// GetDeviceTokens mocks base method. +func (m *MockDeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceTokens", ctx, userID) + ret0, _ := ret[0].([]*devicetokens.DeviceToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeviceTokens indicates an expected call of GetDeviceTokens. +func (mr *MockDeviceTokensClientMockRecorder) GetDeviceTokens(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceTokens", reflect.TypeOf((*MockDeviceTokensClient)(nil).GetDeviceTokens), ctx, userID) +} diff --git a/data/blood/glucose/glucose.go b/data/blood/glucose/glucose.go index 32ca889dd9..ac1d7717a9 100644 --- a/data/blood/glucose/glucose.go +++ b/data/blood/glucose/glucose.go @@ -67,3 +67,7 @@ func NormalizeValueForUnits(value *float64, units *string) *float64 { } return value } + +func IsMmolL(units string) bool { + return units == MmolL || units == Mmoll +} diff --git a/data/events/alerts.go b/data/events/alerts.go new file mode 100644 index 0000000000..4898da12e7 --- /dev/null +++ b/data/events/alerts.go @@ -0,0 +1,233 @@ +package events + +import ( + "context" + "os" + "strings" + "time" + + "github.com/IBM/sarama" + "go.mongodb.org/mongo-driver/bson" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" + lognull "github.com/tidepool-org/platform/log/null" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/push" +) + +type Consumer struct { + Alerts AlertsClient + Data alerts.DataRepository + DeviceTokens auth.DeviceTokensClient + Evaluator AlertsEvaluator + Permissions permission.Client + Pusher Pusher + LastCommunications LastCommunicationsRecorder + TokensProvider auth.ServerSessionTokenProvider + + Logger log.Logger +} + +// DosingDecision removes a stutter to improve readability. +type DosingDecision = dosingdecision.DosingDecision + +// Glucose removes a stutter to improve readability. +type Glucose = glucose.Glucose + +func (c *Consumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + if msg == nil { + c.logger(ctx).Info("UNEXPECTED: nil message; ignoring") + return nil + } + + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, c.TokensProvider) + + switch { + case strings.Contains(msg.Topic, ".data.alerts"): + return c.consumeAlertsConfigs(ctx, session, msg) + case strings.Contains(msg.Topic, ".data.deviceData.alerts"): + return c.consumeDeviceData(ctx, session, msg) + default: + c.logger(ctx).WithField("topic", msg.Topic). + Infof("UNEXPECTED: topic; ignoring") + } + + return nil +} + +func (c *Consumer) consumeAlertsConfigs(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + + cfg := &alerts.Config{} + updatedFields, err := unmarshalMessageValue(msg.Value, cfg) + if err != nil { + return err + } + lgr := c.logger(ctx) + if isActivityAndActivityOnly(updatedFields) { + lgr.WithField("updatedFields", updatedFields). + Debug("alerts config is an activity update, will skip") + return nil + } + + lgr.WithField("cfg", cfg).Info("consuming an alerts config message") + + ctxLog := cfg.LoggerWithFields(c.logger(ctx)) + ctx = log.NewContextWithLogger(ctx, ctxLog) + + notes, err := c.Evaluator.EvaluateData(ctx, cfg.FollowedUserID, cfg.UploadID) + if err != nil { + format := "Unable to evalaute alerts configs triggered event for user %s" + return errors.Wrapf(err, format, cfg.UserID) + } + ctxLog.WithField("notes", notes).Debug("notes generated from alerts config") + + c.pushNotifications(ctx, notes) + + session.MarkMessage(msg, "") + lgr.WithField("message", msg).Debug("marked") + return nil +} + +func isActivityAndActivityOnly(updatedFields []string) bool { + hasActivity := false + for _, field := range updatedFields { + if field == "activity" { + hasActivity = true + } else { + return false + } + } + return hasActivity +} + +func (c *Consumer) consumeDeviceData(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + + datum := &Glucose{} + if _, err := unmarshalMessageValue(msg.Value, datum); err != nil { + return err + } + lgr := c.logger(ctx) + lgr.WithField("data", datum).Info("consuming a device data message") + + if datum.UserID == nil { + return errors.New("Unable to retrieve alerts configs: userID is nil") + } + if datum.UploadID == nil { + return errors.New("Unable to retrieve alerts configs: DataSetID is nil") + } + ctx = log.NewContextWithLogger(ctx, lgr.WithField("followedUserID", *datum.UserID)) + lastComm := alerts.LastCommunication{ + UserID: *datum.UserID, + LastReceivedDeviceData: time.Now(), + DataSetID: *datum.UploadID, + } + err := c.LastCommunications.RecordReceivedDeviceData(ctx, lastComm) + if err != nil { + lgr.WithError(err).Info("Unable to record device data received") + } + notes, err := c.Evaluator.EvaluateData(ctx, *datum.UserID, *datum.UploadID) + if err != nil { + format := "Unable to evalaute device data triggered event for user %s" + return errors.Wrapf(err, format, *datum.UserID) + } + for idx, note := range notes { + lgr.WithField("idx", idx).WithField("note", note).Debug("notes") + } + + c.pushNotifications(ctx, notes) + + session.MarkMessage(msg, "") + lgr.WithField("message", msg).Debug("marked") + return nil +} + +func (c *Consumer) pushNotifications(ctx context.Context, notifications []*alerts.Notification) { + lgr := c.logger(ctx) + + // Notes could be pushed into a Kafka topic to have a more durable retry, + // but that can be added later. + for _, notification := range notifications { + lgr := lgr.WithField("recipientUserID", notification.RecipientUserID) + tokens, err := c.DeviceTokens.GetDeviceTokens(ctx, notification.RecipientUserID) + if err != nil { + lgr.WithError(err).Info("Unable to retrieve device tokens") + } + if len(tokens) == 0 { + lgr.Debug("no device tokens found, won't push any notifications") + } + pushNote := alerts.ToPushNotification(notification) + for _, token := range tokens { + err := c.Pusher.Push(ctx, token, pushNote) + if err != nil { + lgr.WithError(err).Info("Unable to push notification") + } else { + notification.Sent(time.Now()) + } + } + } +} + +// logger produces a log.Logger. +// +// It tries a number of options before falling back to a null Logger. +func (c *Consumer) logger(ctx context.Context) log.Logger { + // A context's Logger is preferred, as it has the most... context. + if ctxLgr := log.LoggerFromContext(ctx); ctxLgr != nil { + return ctxLgr + } + if c.Logger != nil { + return c.Logger + } + fallback, err := logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + fallback = lognull.NewLogger() + } + return fallback +} + +type AlertsEvaluator interface { + // EvaluateData to check if notifications should be sent in response to new data. + EvaluateData(ctx context.Context, followedUserID, dataSetID string) ([]*alerts.Notification, error) +} + +func unmarshalMessageValue[A any](b []byte, payload *A) ([]string, error) { + wrapper := &struct { + FullDocument A `json:"fullDocument"` + UpdateDescription struct { + UpdatedFields map[string]any `json:"updatedFields"` + } `json:"updateDescription"` + }{} + if err := bson.UnmarshalExtJSON(b, false, wrapper); err != nil { + return nil, errors.Wrap(err, "Unable to unmarshal ExtJSON") + } + *payload = wrapper.FullDocument + fields := []string{} + for k := range wrapper.UpdateDescription.UpdatedFields { + fields = append(fields, k) + } + return fields, nil +} + +type AlertsClient interface { + Delete(context.Context, *alerts.Config) error + Get(context.Context, *alerts.Config) (*alerts.Config, error) + List(_ context.Context, userID string) ([]*alerts.Config, error) + Upsert(context.Context, *alerts.Config) error +} + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go new file mode 100644 index 0000000000..4ebcb80f25 --- /dev/null +++ b/data/events/alerts_test.go @@ -0,0 +1,536 @@ +package events + +import ( + "context" + "sync" + "time" + + "github.com/IBM/sarama" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + + "github.com/tidepool-org/platform/alerts" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" + storetest "github.com/tidepool-org/platform/data/store/test" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" + "github.com/tidepool-org/platform/push" +) + +const ( + testUserID = "test-user-id" + testFollowedUserID = "test-followed-user-id" + testUserNoPermsID = "test-user-no-perms" + testDataSetID = "test-data-set-id" +) + +var _ = Describe("Consumer", func() { + Describe("Consume", func() { + It("ignores nil messages", func() { + ctx, _ := addLogger(context.Background()) + c := &Consumer{} + + Expect(c.Consume(ctx, nil, nil)).To(Succeed()) + }) + + It("consumes alerts config events", func() { + cfg := &alerts.Config{ + UserID: testUserID, + FollowedUserID: testFollowedUserID, + Alerts: alerts.Alerts{ + Low: &alerts.LowAlert{ + Base: alerts.Base{Enabled: true}, + Threshold: alerts.Threshold{ + Value: 101.1, + Units: "mg/dL", + }, + }, + }, + } + kafkaMsg := newAlertsMockConsumerMessage(".data.alerts", cfg) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Session.MarkCalls).To(Equal(1)) + }) + + It("records device data events", func() { + blood := newTestStaticDatumMmolL(7.2) + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.LastCommunications.NumCallsFor(testFollowedUserID)).To(Equal(1)) + }) + + It("consumes device data events", func() { + blood := newTestStaticDatumMmolL(7.2) + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Session.MarkCalls).To(Equal(1)) + }) + + It("errors out when the datum's UserID is nil", func() { + blood := newTestStaticDatumMmolL(7.2) + blood.UserID = nil + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)). + To(MatchError(ContainSubstring("userID is nil"))) + Expect(deps.Session.MarkCalls).To(Equal(0)) + }) + + It("errors out when the datum's UploadID is nil", func() { + blood := newTestStaticDatumMmolL(7.2) + blood.UploadID = nil + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)). + To(MatchError(ContainSubstring("DataSetID is nil"))) + Expect(deps.Session.MarkCalls).To(Equal(0)) + }) + + It("pushes notifications", func() { + blood := newTestStaticDatumMmolL(1.0) + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + eval := newMockEvaluator() + eval.Evaluations[testFollowedUserID+testDataSetID] = []mockEvaluatorResponse{ + { + Notifications: []*alerts.Notification{ + { + Message: "something", + RecipientUserID: testUserID, + FollowedUserID: testFollowedUserID, + Sent: func(time.Time) {}, + }, + }, + }, + } + c.Evaluator = eval + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + }) + }) + + Describe("LastCommunicationsReporter", func() { + Describe("RecordReceivedDeviceData", func() { + It("records the metadata for the user id", func() { + testLogger := logtest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), testLogger) + mockRepo := newMockLastCommunicationsRepository() + rec := NewLastCommunicationRecorder(mockRepo) + lastComm := alerts.LastCommunication{ + UserID: testFollowedUserID, + LastReceivedDeviceData: time.Now(), + DataSetID: "test", + } + err := rec.RecordReceivedDeviceData(ctx, lastComm) + Expect(err).To(Succeed()) + Expect(mockRepo.NumCallsFor(testFollowedUserID)).To(Equal(1)) + }) + }) + }) +}) + +type consumerTestDeps struct { + Alerts *mockAlertsConfigClient + Context context.Context + Cursor *mongo.Cursor + DeviceTokens *mockDeviceTokens + Evaluator *mockStaticEvaluator + Logger *logtest.Logger + Permissions *mockPermissionsClient + Pusher Pusher + LastCommunications *mockLastCommunicationsRecorder + Repo *storetest.DataRepository + Session *mockConsumerGroupSession +} + +func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { + GinkgoHelper() + ctx, logger := addLogger(context.Background()) + alertsClient := newMockAlertsConfigClient([]*alerts.Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + Alerts: alerts.Alerts{}, + }, + }, nil) + dataRepo := storetest.NewDataRepository() + dataRepo.GetLastUpdatedForUserOutputs = []storetest.GetLastUpdatedForUserOutput{} + augmentedDocs := augmentMockMongoDocs(docs) + cur := newMockMongoCursor(augmentedDocs) + dataRepo.GetDataRangeOutputs = []storetest.GetDataRangeOutput{ + {Error: nil, Cursor: cur}, + } + permissions := newMockPermissionsClient() + evaluator := newMockStaticEvaluator() + pusher := push.NewLogPusher(logger) + deviceTokens := newMockDeviceTokens() + lastCommunications := newMockLastCommunicationsRecorder() + + return &Consumer{ + Alerts: alertsClient, + Evaluator: evaluator, + Data: dataRepo, + DeviceTokens: deviceTokens, + Permissions: permissions, + Pusher: pusher, + LastCommunications: lastCommunications, + }, &consumerTestDeps{ + Alerts: alertsClient, + Context: ctx, + Cursor: cur, + DeviceTokens: deviceTokens, + Evaluator: evaluator, + Pusher: pusher, + Repo: dataRepo, + Session: &mockConsumerGroupSession{}, + Logger: logger, + LastCommunications: lastCommunications, + Permissions: permissions, + } +} + +// mockEvaluator implements Evaluator. +type mockEvaluator struct { + Evaluations map[string][]mockEvaluatorResponse + EvaluateCalls map[string]int +} + +type mockEvaluatorResponse struct { + Notifications []*alerts.Notification + Error error +} + +func newMockEvaluator() *mockEvaluator { + return &mockEvaluator{ + Evaluations: map[string][]mockEvaluatorResponse{}, + EvaluateCalls: map[string]int{}, + } +} + +func (e *mockEvaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( + []*alerts.Notification, error) { + + key := followedUserID + dataSetID + if _, found := e.Evaluations[key]; !found { + return nil, nil + } + resp := e.Evaluations[key][0] + if len(e.Evaluations[key]) > 1 { + e.Evaluations[key] = e.Evaluations[key][1:] + } + e.EvaluateCalls[key] += 1 + if resp.Error != nil { + return nil, resp.Error + } + return resp.Notifications, nil +} + +func (e *mockEvaluator) EvaluateCallsTotal() int { + total := 0 + for _, val := range e.EvaluateCalls { + total += val + } + return total +} + +// mockStaticEvaluator wraps mock evaluator with a static response. +// +// Useful when testing Consumer behavior, when the behavior of the Evaulator +// isn't relevant to the Consumer test. +type mockStaticEvaluator struct { + *mockEvaluator +} + +func newMockStaticEvaluator() *mockStaticEvaluator { + return &mockStaticEvaluator{newMockEvaluator()} +} + +func (e *mockStaticEvaluator) EvaluateData(ctx context.Context, + followedUserID, dataSetID string) ([]*alerts.Notification, error) { + + e.EvaluateCalls[followedUserID] += 1 + return nil, nil +} + +func newAlertsMockConsumerMessage(topic string, v any) *sarama.ConsumerMessage { + GinkgoHelper() + doc := &struct { + FullDocument any `json:"fullDocument" bson:"fullDocument"` + }{FullDocument: v} + vBytes, err := bson.MarshalExtJSON(doc, false, false) + Expect(err).To(Succeed()) + return &sarama.ConsumerMessage{ + Value: vBytes, + Topic: topic, + } +} + +func addLogger(ctx context.Context) (context.Context, *logtest.Logger) { + GinkgoHelper() + if ctx == nil { + ctx = context.Background() + } + + lgr := logtest.NewLogger() + return log.NewContextWithLogger(ctx, lgr), lgr +} + +func augmentMockMongoDocs(inDocs []interface{}) []interface{} { + defaultDoc := bson.M{ + "_userId": testFollowedUserID, + "_active": true, + "type": "upload", + "time": time.Now(), + } + outDocs := []interface{}{} + for _, inDoc := range inDocs { + newDoc := defaultDoc + switch v := (inDoc).(type) { + case map[string]interface{}: + for key, val := range v { + newDoc[key] = val + } + outDocs = append(outDocs, newDoc) + default: + outDocs = append(outDocs, inDoc) + } + } + return outDocs +} + +func newMockMongoCursor(docs []interface{}) *mongo.Cursor { + GinkgoHelper() + cur, err := mongo.NewCursorFromDocuments(docs, nil, nil) + Expect(err).To(Succeed()) + return cur +} + +type mockAlertsConfigClient struct { + Error error + Configs []*alerts.Config +} + +func newMockAlertsConfigClient(c []*alerts.Config, err error) *mockAlertsConfigClient { + if c == nil { + c = []*alerts.Config{} + } + return &mockAlertsConfigClient{ + Configs: c, + Error: err, + } +} + +func (c *mockAlertsConfigClient) Delete(_ context.Context, _ *alerts.Config) error { + return c.Error +} + +func (c *mockAlertsConfigClient) Get(_ context.Context, _ *alerts.Config) (*alerts.Config, error) { + if c.Error != nil { + return nil, c.Error + } else if len(c.Configs) > 0 { + return c.Configs[0], nil + } + return nil, nil +} + +func (c *mockAlertsConfigClient) List(_ context.Context, userID string) ([]*alerts.Config, error) { + if c.Error != nil { + return nil, c.Error + } else if len(c.Configs) > 0 { + return c.Configs, nil + } + return nil, nil +} + +func (c *mockAlertsConfigClient) Upsert(_ context.Context, _ *alerts.Config) error { + return c.Error +} + +type mockConsumerGroupSession struct { + MarkCalls int +} + +func (s *mockConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MemberID() string { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) GenerationID() int32 { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) Commit() { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { + s.MarkCalls++ +} + +func (s *mockConsumerGroupSession) Context() context.Context { + panic("not implemented") // TODO: Implement +} + +type mockPermissionsClient struct { + Error error + Perms map[string]permission.Permissions +} + +func newMockPermissionsClient() *mockPermissionsClient { + return &mockPermissionsClient{ + Perms: map[string]permission.Permissions{}, + } +} + +func (c *mockPermissionsClient) Key(requesterUserID, targetUserID string) string { + return requesterUserID + targetUserID +} + +func (c *mockPermissionsClient) Allow(requestUserID, perm, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + if _, found := c.Perms[key]; !found { + c.Perms[key] = permission.Permissions{} + } + c.Perms[key][perm] = permission.Permission{} +} + +func (c *mockPermissionsClient) DenyAll(requestUserID, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + delete(c.Perms, key) +} + +func (c *mockPermissionsClient) GetUserPermissions(ctx context.Context, requestUserID string, targetUserID string) (permission.Permissions, error) { + if c.Error != nil { + return nil, c.Error + } + if p, ok := c.Perms[c.Key(requestUserID, targetUserID)]; ok { + return p, nil + } else { + return nil, errors.New("test error NOT FOUND") + } +} + +type mockLastCommunicationsRecorder struct { + recordCalls map[string]int + recordCallsMu sync.Mutex +} + +func newMockLastCommunicationsRecorder() *mockLastCommunicationsRecorder { + return &mockLastCommunicationsRecorder{ + recordCalls: map[string]int{}, + } +} + +func (r *mockLastCommunicationsRecorder) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + r.recordCalls[lastComm.UserID]++ + return nil +} + +func (r *mockLastCommunicationsRecorder) NumCallsFor(userID string) int { + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + return r.recordCalls[userID] +} + +type mockLastCommunicationsRepository struct { + recordCalls map[string]int + recordCallsMu sync.Mutex +} + +func newMockLastCommunicationsRepository() *mockLastCommunicationsRepository { + return &mockLastCommunicationsRepository{ + recordCalls: map[string]int{}, + } +} + +func (r *mockLastCommunicationsRepository) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + r.recordCalls[lastComm.UserID]++ + return nil +} + +func (r *mockLastCommunicationsRepository) OverdueCommunications(ctx context.Context) ( + []alerts.LastCommunication, error) { + + return nil, nil +} + +func (r *mockLastCommunicationsRepository) NumCallsFor(userID string) int { + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + return r.recordCalls[userID] +} + +func (r *mockLastCommunicationsRepository) EnsureIndexes() error { return nil } + +type mockDeviceTokens struct { + Tokens map[string][]*devicetokens.DeviceToken +} + +func newMockDeviceTokens() *mockDeviceTokens { + return &mockDeviceTokens{ + Tokens: map[string][]*devicetokens.DeviceToken{}, + } +} + +func (t *mockDeviceTokens) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + if tokens, found := t.Tokens[userID]; found { + return tokens, nil + } + return nil, nil +} + +func newTestStaticDatumMmolL(value float64) *glucose.Glucose { + return &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + UserID: pointer.FromAny(testFollowedUserID), + Time: pointer.FromTime(time.Now()), + UploadID: pointer.FromAny(testDataSetID), + }, + Units: pointer.FromString(dataBloodGlucose.MmolL), + Value: pointer.FromFloat64(value), + }, + } +} diff --git a/data/events/events.go b/data/events/events.go index 3e41a0630d..e4795b93f1 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -1,15 +1,26 @@ package events import ( + "bytes" "context" + "fmt" + "log/slog" + "os" + "strconv" + "sync" + "time" + "github.com/IBM/sarama" + "github.com/tidepool-org/go-common/asyncevents" ev "github.com/tidepool-org/go-common/events" + "github.com/tidepool-org/platform/alerts" dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" dataStore "github.com/tidepool-org/platform/data/store" summaryStore "github.com/tidepool-org/platform/data/summary/store" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" ) type userDeletionEventsHandler struct { @@ -58,3 +69,505 @@ func (u *userDeletionEventsHandler) HandleDeleteUserEvent(payload ev.DeleteUserE } return nil } + +// AlertsEventRetryDelayMaximum is the maximum delay between consumption +// retries. +const AlertsEventRetryDelayMaximum = time.Minute + +// AlertsEventRetries is the maximum consumption attempts before giving up. +const AlertsEventRetries = 1000 + +// AlertsEventConsumptionTimeout is the maximum time to process an alerts event. +const AlertsEventConsumptionTimeout = 30 * time.Second + +// SaramaRunner interfaces between [events.Runner] and go-common's +// [asyncevents.SaramaEventsConsumer]. +// +// This means providing Initialize(), Run(), and Terminate() to satisfy events.Runner, while +// under the hood calling SaramaEventConsumer's Run(), and canceling its Context as +// appropriate. +type SaramaRunner struct { + eventsRunner SaramaEventsRunner + cancelCtx context.CancelFunc + cancelMu sync.Mutex +} + +func NewSaramaRunner(eventsRunner SaramaEventsRunner) *SaramaRunner { + return &SaramaRunner{ + eventsRunner: eventsRunner, + } +} + +// SaramaEventsRunner is implemented by go-common's [asyncevents.SaramaEventsRunner]. +type SaramaEventsRunner interface { + Run(ctx context.Context) error +} + +// SaramaRunnerConfig collects values needed to initialize a SaramaRunner. +// +// This provides isolation for the SaramaRunner from ConfigReporter, +// envconfig, or any of the other options in platform for reading config +// values. +type SaramaRunnerConfig struct { + Brokers []string + GroupID string + Topics []string + MessageConsumer asyncevents.SaramaMessageConsumer + + Sarama *sarama.Config +} + +func (r *SaramaRunner) Initialize() error { return nil } + +// Run adapts platform's event.Runner to work with go-common's +// asyncevents.SaramaEventsConsumer. +func (r *SaramaRunner) Run() error { + if r.eventsRunner == nil { + return errors.New("Unable to run SaramaRunner, eventsRunner is nil") + } + + r.cancelMu.Lock() + ctx, err := func() (context.Context, error) { + defer r.cancelMu.Unlock() + if r.cancelCtx != nil { + return nil, errors.New("Unable to Run SaramaRunner, it's already initialized") + } + var ctx context.Context + ctx, r.cancelCtx = context.WithCancel(context.Background()) + return ctx, nil + }() + if err != nil { + return err + } + if err := r.eventsRunner.Run(ctx); err != nil { + return errors.Wrap(err, "Unable to Run SaramaRunner") + } + return nil +} + +// Terminate adapts platform's event.Runner to work with go-common's +// asyncevents.SaramaEventsConsumer. +func (r *SaramaRunner) Terminate() error { + r.cancelMu.Lock() + defer r.cancelMu.Unlock() + if r.cancelCtx == nil { + return errors.New("Unable to Terminate SaramaRunner, it's not running") + } + r.cancelCtx() + return nil +} + +// CappedExponentialBinaryDelay builds delay functions that use exponential +// binary backoff with a maximum duration. +func CappedExponentialBinaryDelay(cap time.Duration) func(int) time.Duration { + return func(tries int) time.Duration { + b := asyncevents.DelayExponentialBinary(tries) + if b > cap { + return cap + } + return b + } +} + +type AlertsEventsConsumer struct { + Consumer asyncevents.SaramaMessageConsumer +} + +func (c *AlertsEventsConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) error { + err := c.Consumer.Consume(ctx, session, message) + if err != nil { + session.MarkMessage(message, fmt.Sprintf("I have given up after error: %s", err)) + return err + } + return nil +} + +// CascadingSaramaEventsRunner manages multiple sarama consumer groups to execute a +// topic-cascading retry process. +// +// The topic names are generated from Config.Topics combined with Delays. If given a single +// topic "updates", and delays: 0s, 1s, and 5s, then the following topics will be consumed: +// updates, updates-retry-1s, updates-retry-5s. The consumer of the updates-retry-5s topic +// will write failed messages to updates-dead. +// +// The inspiration for this system was drawn from +// https://www.uber.com/blog/reliable-reprocessing/ +type CascadingSaramaEventsRunner struct { + Config SaramaRunnerConfig + Delays []time.Duration + Logger log.Logger + SaramaBuilders SaramaBuilders +} + +func NewCascadingSaramaEventsRunner(config SaramaRunnerConfig, logger log.Logger, + delays []time.Duration) *CascadingSaramaEventsRunner { + + return &CascadingSaramaEventsRunner{ + Config: config, + Delays: delays, + Logger: logger, + SaramaBuilders: DefaultSaramaBuilders{}, + } +} + +// LimitedAsyncProducer restricts the [sarama.AsyncProducer] interface to ensure that its +// recipient isn't able to call Close(), thereby opening the potential for a panic when +// writing to a closed channel. +type LimitedAsyncProducer interface { + AbortTxn() error + BeginTxn() error + CommitTxn() error + Input() chan<- *sarama.ProducerMessage +} + +func (r *CascadingSaramaEventsRunner) Run(ctx context.Context) error { + if len(r.Config.Topics) == 0 { + return errors.New("no topics") + } + if len(r.Delays) == 0 { + return errors.New("no delays") + } + + producersCtx, cancel := context.WithCancel(ctx) + defer cancel() + var wg sync.WaitGroup + errs := make(chan error, len(r.Config.Topics)*len(r.Delays)) + defer func() { + r.logger(ctx).Debug("CascadingSaramaEventsRunner: waiting for consumers") + wg.Wait() + r.logger(ctx).Debug("CascadingSaramaEventsRunner: all consumers returned") + close(errs) + }() + + for _, topic := range r.Config.Topics { + for idx, delay := range r.Delays { + producerCfg := r.producerConfig(idx, delay) + // The producer is built here rather than in buildConsumer() to control when + // producer is closed. Were the producer to be closed before consumer.Run() + // returns, it would be possible for consumer to write to the producer's + // Inputs() channel, which if closed, would cause a panic. + producer, err := r.SaramaBuilders.NewAsyncProducer(r.Config.Brokers, producerCfg) + if err != nil { + return errors.Wrapf(err, "Unable to build async producer: %s", r.Config.GroupID) + } + + consumer, err := r.buildConsumer(producersCtx, idx, producer, delay, topic) + if err != nil { + return err + } + + wg.Add(1) + go func(topic string) { + defer func() { wg.Done(); producer.Close() }() + if err := consumer.Run(producersCtx); err != nil { + errs <- fmt.Errorf("topics[%q]: %s", topic, err) + } + r.logger(ctx).WithField("topic", topic). + Debug("CascadingSaramaEventsRunner: consumer go proc returning") + }(topic) + } + } + + select { + case <-ctx.Done(): + r.logger(ctx).Debug("CascadingSaramaEventsRunner: context is done") + return nil + case err := <-errs: + r.logger(ctx).WithError(err). + Debug("CascadingSaramaEventsRunner: Run(): error from consumer") + return err + } +} + +func (r *CascadingSaramaEventsRunner) producerConfig(idx int, delay time.Duration) *sarama.Config { + uniqueConfig := *r.Config.Sarama + hostID := os.Getenv("HOSTNAME") // set by default in kubernetes pods + if hostID == "" { + hostID = fmt.Sprintf("%d-%d", time.Now().UnixNano()/int64(time.Second), os.Getpid()) + } + txnID := fmt.Sprintf("%s-%s-%d-%s", r.Config.GroupID, delay.String(), idx, hostID) + uniqueConfig.Producer.Transaction.ID = txnID + uniqueConfig.Producer.Idempotent = true + uniqueConfig.Producer.RequiredAcks = sarama.WaitForAll + uniqueConfig.Net.MaxOpenRequests = 1 + uniqueConfig.Consumer.IsolationLevel = sarama.ReadCommitted + return &uniqueConfig +} + +// SaramaBuilders allows tests to inject mock objects. +type SaramaBuilders interface { + NewAsyncProducer([]string, *sarama.Config) (sarama.AsyncProducer, error) + NewConsumerGroup([]string, string, *sarama.Config) (sarama.ConsumerGroup, error) +} + +// DefaultSaramaBuilders implements SaramaBuilders for normal, non-test use. +type DefaultSaramaBuilders struct{} + +func (DefaultSaramaBuilders) NewAsyncProducer(brokers []string, config *sarama.Config) ( + sarama.AsyncProducer, error) { + + return sarama.NewAsyncProducer(brokers, config) +} + +func (DefaultSaramaBuilders) NewConsumerGroup(brokers []string, groupID string, + config *sarama.Config) (sarama.ConsumerGroup, error) { + + return sarama.NewConsumerGroup(brokers, groupID, config) +} + +func (r *CascadingSaramaEventsRunner) buildConsumer(ctx context.Context, idx int, + producer LimitedAsyncProducer, delay time.Duration, baseTopic string) ( + *asyncevents.SaramaEventsConsumer, error) { + + groupID := r.Config.GroupID + if delay > 0 { + groupID += "-retry-" + delay.String() + } + group, err := r.SaramaBuilders.NewConsumerGroup(r.Config.Brokers, groupID, + r.Config.Sarama) + if err != nil { + return nil, errors.Wrapf(err, "Unable to build sarama consumer group: %s", groupID) + } + + var consumer asyncevents.SaramaMessageConsumer = r.Config.MessageConsumer + if len(r.Delays) > 0 { + nextTopic := baseTopic + "-dead" + if idx+1 < len(r.Delays) { + nextTopic = baseTopic + "-retry-" + r.Delays[idx+1].String() + } + consumer = &CascadingConsumer{ + Consumer: consumer, + NextTopic: nextTopic, + Producer: producer, + Logger: r.Logger, + } + } + if delay > 0 { + consumer = &NotBeforeConsumer{ + Consumer: consumer, + Logger: r.Logger, + } + } + aeLoggerAdapter := &asynceventsLoggerAdapter{r.Logger} + handler := asyncevents.NewSaramaConsumerGroupHandler(aeLoggerAdapter, consumer, + AlertsEventConsumptionTimeout) + topic := baseTopic + if delay > 0 { + topic += "-retry-" + delay.String() + } + r.logger(ctx).WithField("topic", topic).Debug("creating consumer") + + return asyncevents.NewSaramaEventsConsumer(group, handler, topic), nil +} + +func (r *CascadingSaramaEventsRunner) logger(ctx context.Context) log.Logger { + // A context logger might have more fields or ... context. So prefer that if availble. + if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { + return ctxLogger + } + if r.Logger == nil { + // logjson.NewLogger will only fail if an argument is missing. + r.Logger, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + } + return r.Logger +} + +// NotBeforeConsumer delays consumption until a specified time. +type NotBeforeConsumer struct { + Consumer asyncevents.SaramaMessageConsumer + Logger log.Logger +} + +func (c *NotBeforeConsumer) Consume(ctx context.Context, session sarama.ConsumerGroupSession, + msg *sarama.ConsumerMessage) error { + + notBefore, err := c.notBeforeFromMsgHeaders(msg) + if err != nil { + c.Logger.WithError(err).Info("Unable to parse kafka header not-before value") + } + delay := time.Until(notBefore) + + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != context.Canceled { + return ctxErr + } + return nil + case <-time.After(time.Until(notBefore)): + if !notBefore.IsZero() { + fields := log.Fields{"topic": msg.Topic, "not-before": notBefore, "delay": delay} + c.Logger.WithFields(fields).Debugf("delayed") + } + return c.Consumer.Consume(ctx, session, msg) + } +} + +// HeaderNotBefore tells consumers not to consume a message before a certain time. +var HeaderNotBefore = []byte("x-tidepool-not-before") + +// NotBeforeTimeFormat specifies the [time.Parse] format to use for HeaderNotBefore. +var NotBeforeTimeFormat = time.RFC3339Nano + +// HeaderFailures counts the number of failures encountered trying to consume the message. +var HeaderFailures = []byte("x-tidepool-failures") + +// FailuresToDelay maps the number of consumption failures to the next delay. +// +// Rather than using a failures header, the name of the topic could be used as a lookup, if +// so desired. +var FailuresToDelay = map[int]time.Duration{ + 0: 0, + 1: 1 * time.Second, + 2: 2 * time.Second, + 3: 3 * time.Second, + 4: 5 * time.Second, +} + +func (c *NotBeforeConsumer) notBeforeFromMsgHeaders(msg *sarama.ConsumerMessage) ( + time.Time, error) { + + for _, header := range msg.Headers { + if bytes.Equal(header.Key, HeaderNotBefore) { + notBefore, err := time.Parse(NotBeforeTimeFormat, string(header.Value)) + if err != nil { + return time.Time{}, fmt.Errorf("parsing not before header: %s", err) + } else { + return notBefore, nil + } + } + } + return time.Time{}, fmt.Errorf("header not found: x-tidepool-not-before") +} + +// CascadingConsumer cascades messages that failed to be consumed to another topic. +// +// It also sets an adjustable delay via the "not-before" and "failures" headers so that as +// the message moves from topic to topic, the time between processing is increased according +// to [FailuresToDelay]. +type CascadingConsumer struct { + Consumer asyncevents.SaramaMessageConsumer + NextTopic string + Producer LimitedAsyncProducer + Logger log.Logger +} + +func (c *CascadingConsumer) Consume(ctx context.Context, session sarama.ConsumerGroupSession, + msg *sarama.ConsumerMessage) (err error) { + + if err := c.Consumer.Consume(ctx, session, msg); err != nil { + txnErr := c.withTxn(func() error { + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != context.Canceled { + return ctxErr + } + return nil + case c.Producer.Input() <- c.cascadeMessage(msg): + fields := log.Fields{"from": msg.Topic, "to": c.NextTopic} + c.Logger.WithFields(fields).Debug("cascaded") + return nil + } + }) + if txnErr != nil { + c.Logger.WithError(txnErr).Info("Unable to complete cascading transaction") + return err + } + } + return nil +} + +// withTxn wraps a function with a transaction that is aborted if an error is returned. +func (c *CascadingConsumer) withTxn(f func() error) (err error) { + if err := c.Producer.BeginTxn(); err != nil { + return errors.Wrap(err, "Unable to begin transaction") + } + defer func(err *error) { + if err != nil && *err != nil { + if abortErr := c.Producer.AbortTxn(); abortErr != nil { + c.Logger.WithError(abortErr).Info("Unable to abort transaction") + } + return + } + if commitErr := c.Producer.CommitTxn(); commitErr != nil { + c.Logger.WithError(commitErr).Info("Unable to commit transaction") + } + }(&err) + return f() +} + +// cascadeMessage to the next topic. +func (c *CascadingConsumer) cascadeMessage(msg *sarama.ConsumerMessage) *sarama.ProducerMessage { + pHeaders := make([]sarama.RecordHeader, len(msg.Headers)) + for idx, header := range msg.Headers { + pHeaders[idx] = *header + } + return &sarama.ProducerMessage{ + Key: sarama.ByteEncoder(msg.Key), + Value: sarama.ByteEncoder(msg.Value), + Topic: c.NextTopic, + Headers: c.updateCascadeHeaders(pHeaders), + } +} + +// updateCascadeHeaders calculates not before and failures header values. +// +// Existing not before and failures headers will be dropped in place of the new ones. +func (c *CascadingConsumer) updateCascadeHeaders(headers []sarama.RecordHeader) []sarama.RecordHeader { + failures := 0 + notBefore := time.Now() + + keep := make([]sarama.RecordHeader, 0, len(headers)) + for _, header := range headers { + switch { + case bytes.Equal(header.Key, HeaderNotBefore): + continue // Drop this header, we'll add a new version below. + case bytes.Equal(header.Key, HeaderFailures): + parsed, err := strconv.ParseInt(string(header.Value), 10, 32) + if err != nil { + c.Logger.WithError(err).Info("Unable to parse consumption failures count") + } else { + failures = int(parsed) + notBefore = notBefore.Add(FailuresToDelay[failures]) + } + continue // Drop this header, we'll add a new version below. + } + keep = append(keep, header) + } + + keep = append(keep, sarama.RecordHeader{ + Key: HeaderNotBefore, + Value: []byte(notBefore.Format(NotBeforeTimeFormat)), + }) + keep = append(keep, sarama.RecordHeader{ + Key: HeaderFailures, + Value: []byte(strconv.Itoa(failures + 1)), + }) + + return keep +} + +type LastCommunicationsRecorder interface { + // RecordReceivedDeviceData to support sending care partner alerts. + // + // Metadata about when we last received data for any given user is + // used to determine if alerts should be sent to the care partners + // of a given user. + RecordReceivedDeviceData(context.Context, alerts.LastCommunication) error +} + +// asynceventsLoggerAdapter adapts a [log.Logger] to [asyncevents.Logger]. +type asynceventsLoggerAdapter struct { + log.Logger +} + +var logLevels map[slog.Level]log.Level = map[slog.Level]log.Level{ + slog.LevelDebug: log.DebugLevel, + slog.LevelInfo: log.InfoLevel, + slog.LevelWarn: log.WarnLevel, + slog.LevelError: log.ErrorLevel, +} + +func (a *asynceventsLoggerAdapter) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + a.Logger.Log(logLevels[level], fmt.Sprintf(msg, args...)) +} diff --git a/data/events/events_suite_test.go b/data/events/events_suite_test.go new file mode 100644 index 0000000000..4bab08b129 --- /dev/null +++ b/data/events/events_suite_test.go @@ -0,0 +1,34 @@ +package events + +import ( + "log/slog" + "os" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/test" +) + +func TestSuite(t *testing.T) { + test.Test(t) +} + +var _ = BeforeSuite(func() { + slog.SetDefault(devNullSlogLogger(GinkgoT())) +}) + +// Cleaner is part of testing.T and FullGinkgoTInterface +type Cleaner interface { + Cleanup(func()) +} + +func devNullSlogLogger(c Cleaner) *slog.Logger { + f, err := os.Open(os.DevNull) + Expect(err).To(Succeed()) + c.Cleanup(func() { + Expect(f.Close()).To(Succeed()) + }) + return slog.New(slog.NewTextHandler(f, nil)) +} diff --git a/data/events/events_test.go b/data/events/events_test.go new file mode 100644 index 0000000000..9e8036a54d --- /dev/null +++ b/data/events/events_test.go @@ -0,0 +1,747 @@ +package events + +import ( + "bytes" + "context" + "fmt" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/IBM/sarama" + "github.com/IBM/sarama/mocks" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/log/devlog" + lognull "github.com/tidepool-org/platform/log/null" + logtest "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("SaramaRunner", func() { + Context("has a lifecycle", func() { + newTestRunner := func() *SaramaRunner { + return NewSaramaRunner(&mockEventsRunner{}) + } + It("starts with Run() and stops with Terminate()", func() { + r := newTestRunner() + var runErr error + var errMu sync.Mutex + launched := make(chan struct{}, 1) + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + runErr = r.Run() + launched <- struct{}{} + }() + }() + <-launched + time.Sleep(time.Millisecond) + errMu.Lock() + defer errMu.Unlock() + + Expect(r.Terminate()).To(Succeed()) + Eventually(runErr).WithTimeout(10 * time.Millisecond).Should(Succeed()) + }) + + Describe("Run()", func() { + var errMu sync.Mutex + + It("can be started only once", func() { + r := newTestRunner() + var firstRunErr, secondRunErr error + launched := make(chan struct{}, 2) + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + firstRunErr = r.Run() + launched <- struct{}{} + }() + }() + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + secondRunErr = r.Run() + launched <- struct{}{} + }() + + }() + <-launched + <-launched + errMu.Lock() + defer errMu.Unlock() + + // The above doesn't _guarantee_ that Run has been called twice, + // but... it should work. + + Expect(r.Terminate()).To(Succeed()) + if firstRunErr != nil { + Expect(firstRunErr).To(MatchError(ContainSubstring("it's already initialized"))) + Expect(secondRunErr).To(Succeed()) + } else { + Expect(firstRunErr).To(Succeed()) + Expect(secondRunErr).To(MatchError(ContainSubstring("it's already initialized"))) + } + }) + + It("can't be Terminate()'d before it's Run()", func() { + r := newTestRunner() + Expect(r.Terminate()).To(MatchError(ContainSubstring("it's not running"))) + }) + }) + }) +}) + +var _ = DescribeTable("CappedExponentialBinaryDelay", + func(cap time.Duration, input int, output time.Duration) { + f := CappedExponentialBinaryDelay(cap) + Expect(f(input)).To(Equal(output)) + }, + Entry("cap: 1m; tries: 0", time.Minute, 0, time.Second), + Entry("cap: 1m; tries: 1", time.Minute, 1, 2*time.Second), + Entry("cap: 1m; tries: 2", time.Minute, 2, 4*time.Second), + Entry("cap: 1m; tries: 3", time.Minute, 3, 8*time.Second), + Entry("cap: 1m; tries: 4", time.Minute, 4, 16*time.Second), + Entry("cap: 1m; tries: 5", time.Minute, 5, 32*time.Second), + Entry("cap: 1m; tries: 6", time.Minute, 6, time.Minute), + Entry("cap: 1m; tries: 20", time.Minute, 20, time.Minute), +) + +var _ = Describe("NotBeforeConsumer", func() { + Describe("Consume", func() { + var newTestMsg = func(notBefore time.Time) *sarama.ConsumerMessage { + headers := []*sarama.RecordHeader{} + if !notBefore.IsZero() { + headers = append(headers, &sarama.RecordHeader{ + Key: HeaderNotBefore, + Value: []byte(notBefore.Format(NotBeforeTimeFormat)), + }) + } + return &sarama.ConsumerMessage{Topic: "test.topic", Headers: headers} + } + + It("delays based on the x-tidepool-not-before header", func() { + logger := newTestDevlog() + testDelay := 10 * time.Millisecond + ctx := context.Background() + start := time.Now() + notBefore := start.Add(testDelay) + msg := newTestMsg(notBefore) + dc := &NotBeforeConsumer{ + Consumer: &mockSaramaMessageConsumer{Logger: logger}, + Logger: logger, + } + + err := dc.Consume(ctx, nil, msg) + + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", testDelay)) + }) + + It("aborts if canceled", func() { + logger := newTestDevlog() + testDelay := 10 * time.Millisecond + abortAfter := 1 * time.Millisecond + notBefore := time.Now().Add(testDelay) + msg := newTestMsg(notBefore) + dc := &NotBeforeConsumer{ + Consumer: &mockSaramaMessageConsumer{Delay: time.Minute, Logger: logger}, + Logger: logger, + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + <-time.After(abortAfter) + }() + start := time.Now() + + err := dc.Consume(ctx, nil, msg) + + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", abortAfter)) + }) + + }) +}) + +var _ = Describe("CascadingConsumer", func() { + Describe("Consume", func() { + var testMsg = &sarama.ConsumerMessage{ + Topic: "test.topic", + } + + Context("on failure", func() { + It("cascades topics", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{} + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + if msg.Topic != nextTopic { + return fmt.Errorf("expected topic to be %q, got %q", nextTopic, msg.Topic) + } + return nil + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + + It("increments the failures header", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{ + Headers: []*sarama.RecordHeader{ + { + Key: HeaderFailures, Value: []byte("3"), + }, + }, + } + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + failures := 0 + for _, header := range msg.Headers { + if !bytes.Equal(header.Key, HeaderFailures) { + continue + } + parsed, err := strconv.ParseInt(string(header.Value), 10, 32) + Expect(err).To(Succeed()) + failures = int(parsed) + if failures != 4 { + return fmt.Errorf("expected failures == 4, got %d", failures) + } + return nil + } + return fmt.Errorf("expected failures header wasn't found") + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + + It("updates the not before header", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{ + Headers: []*sarama.RecordHeader{ + { + Key: HeaderFailures, Value: []byte("2"), + }, + }, + } + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + for _, header := range msg.Headers { + if !bytes.Equal(header.Key, HeaderNotBefore) { + continue + } + parsed, err := time.Parse(NotBeforeTimeFormat, string(header.Value)) + if err != nil { + return err + } + until := time.Until(parsed) + delta := 10 * time.Millisecond + if until < 2*time.Second-delta || until > 2*time.Second+delta { + return fmt.Errorf("expected 2 seconds' delay, got: %s", until) + } + return nil + } + return fmt.Errorf("expected failures header wasn't found") + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + }) + + Context("on success", func() { + It("doesn't produce a new message", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{} + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{Logger: logger}, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + }) + + Context("when canceled", func() { + It("aborts", func() { + logger := newTestDevlog() + abortAfter := 1 * time.Millisecond + p := newMockSaramaAsyncProducer(nil) + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{Delay: time.Minute, Logger: logger}, + Logger: lognull.NewLogger(), + Producer: p, + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + time.Sleep(abortAfter) + }() + start := time.Now() + + err := sc.Consume(ctx, nil, testMsg) + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", abortAfter)) + }) + }) + }) +}) + +var _ = Describe("CascadingSaramaEventsRunner", func() { + It("cascades through configured delays", func() { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + testDelays := []time.Duration{0, 1, 2, 3, 5} + testLogger := newTestDevlog() + testMessageConsumer := &mockSaramaMessageConsumer{ + Delay: time.Millisecond, + Err: fmt.Errorf("test error"), + Logger: testLogger, + } + testConfig := SaramaRunnerConfig{ + Topics: []string{"test.cascading"}, + MessageConsumer: testMessageConsumer, + Sarama: mocks.NewTestConfig(), + } + producers := []*mockSaramaAsyncProducer{} + var msgsReceived atomic.Int32 + prodFunc := func(_ []string, config *sarama.Config) (sarama.AsyncProducer, error) { + prod := newMockSaramaAsyncProducer(func(msg *sarama.ProducerMessage) { + msgsReceived.Add(1) + if int(msgsReceived.Load()) == len(testDelays) { + // Once all messages are entered, the test is complete. Cancel the + // context to shut it all down properly. + cancel() + } + }) + producers = append(producers, prod) + return prod, nil + } + sser := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + sser.SaramaBuilders = newTestSaramaBuilders(nil, prodFunc) + + err := sser.Run(ctx) + Expect(err).To(Succeed()) + for pIdx, p := range producers { + Expect(p.isClosed()).To(BeTrue()) + Expect(p.messages).To(HaveLen(1)) + topic := p.messages[0].Topic + switch { + case pIdx+1 < len(testDelays): + Expect(topic).To(MatchRegexp(fmt.Sprintf(".*-retry-%s$", testDelays[pIdx+1]))) + default: + Expect(topic).To(MatchRegexp(".*-dead$")) + } + } + }) + + Describe("logger", func() { + It("prefers a context's logger", func() { + testLogger := logtest.NewLogger() + ctxLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + + ctx := log.NewContextWithLogger(context.Background(), ctxLogger) + got := r.logger(ctx) + + Expect(got).To(Equal(ctxLogger)) + }) + + Context("without a context logger", func() { + It("uses the configured logger", func() { + testLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + + ctx := context.Background() + got := r.logger(ctx) + + Expect(got).To(Equal(testLogger)) + }) + + Context("or any configured logger", func() { + It("doesn't panic", func() { + testLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + + ctx := context.Background() + got := r.logger(ctx) + + Expect(func() { + got.Debug("testing") + }).ToNot(Panic()) + }) + }) + }) + }) +}) + +// testSaramaBuilders injects mocks into the CascadingSaramaEventsRunner +type testSaramaBuilders struct { + consumerGroup func([]string, string, *sarama.Config) (sarama.ConsumerGroup, error) + producer func([]string, *sarama.Config) (sarama.AsyncProducer, error) +} + +func newTestSaramaBuilders( + cgFunc func([]string, string, *sarama.Config) (sarama.ConsumerGroup, error), + prodFunc func([]string, *sarama.Config) (sarama.AsyncProducer, error)) *testSaramaBuilders { + + if cgFunc == nil { + cgFunc = func(_ []string, groupID string, config *sarama.Config) (sarama.ConsumerGroup, error) { + logger := newTestDevlog() + return &mockSaramaConsumerGroup{ + Logger: logger, + }, nil + } + } + if prodFunc == nil { + prodFunc = func(_ []string, config *sarama.Config) (sarama.AsyncProducer, error) { + return mocks.NewAsyncProducer(GinkgoT(), config), nil + } + } + return &testSaramaBuilders{ + consumerGroup: cgFunc, + producer: prodFunc, + } +} + +func (b testSaramaBuilders) NewAsyncProducer(brokers []string, config *sarama.Config) ( + sarama.AsyncProducer, error) { + + return b.producer(brokers, config) +} + +func (b testSaramaBuilders) NewConsumerGroup(brokers []string, groupID string, + config *sarama.Config) (sarama.ConsumerGroup, error) { + + return b.consumerGroup(brokers, groupID, config) +} + +type mockEventsRunner struct { + Err error +} + +func (r *mockEventsRunner) Run(ctx context.Context) error { + return r.Err +} + +type mockSaramaMessageConsumer struct { + Delay time.Duration + Err error + Logger log.Logger +} + +func (c *mockSaramaMessageConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + c.Logger.Debugf("mockSaramaMessageConsumer[%q] is consuming %+v", msg.Topic, msg) + defer func(err *error) { + c.Logger.Debugf("mockSaramaMessageConsumer[%q] returns %s", msg.Topic, *err) + }(&err) + + done := ctx.Done() + select { + case <-time.After(c.Delay): + // no op + case <-done: + return ctx.Err() + } + + if c.Err != nil { + return c.Err + } + return nil +} + +type mockSaramaConsumerGroup struct { + Messages chan *sarama.ConsumerMessage + ConsumeErr error + Logger log.Logger +} + +func (g *mockSaramaConsumerGroup) Consume(ctx context.Context, + topics []string, handler sarama.ConsumerGroupHandler) error { + + if g.ConsumeErr != nil { + return g.ConsumeErr + } + + g.Logger.Debugf("mockSaramaConsumerGroup%s consuming", topics) + session := &mockSaramaConsumerGroupSession{} + if g.Messages == nil { + g.Messages = make(chan *sarama.ConsumerMessage) + go func() { <-ctx.Done(); close(g.Messages) }() + go g.feedYourClaim(ctx, topics[0]) + } + claim := &mockSaramaConsumerGroupClaim{ + topic: topics[0], + messages: g.Messages, + } + + err := handler.ConsumeClaim(session, claim) + if err != nil { + return err + } + return nil +} + +func (g *mockSaramaConsumerGroup) feedYourClaim(ctx context.Context, topic string) { + msg := &sarama.ConsumerMessage{Topic: topic} + select { + case <-ctx.Done(): + return + case g.Messages <- msg: + // no op + } +} + +func (g *mockSaramaConsumerGroup) Errors() <-chan error { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Close() error { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Pause(partitions map[string][]int32) { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Resume(partitions map[string][]int32) { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) PauseAll() { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) ResumeAll() { + panic("not implemented") // implement if needed} +} + +type mockSaramaConsumerGroupSession struct{} + +func (s *mockSaramaConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MemberID() string { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) GenerationID() int32 { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) Commit() { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) Context() context.Context { + return context.Background() +} + +type mockSaramaConsumerGroupClaim struct { + messages <-chan *sarama.ConsumerMessage + topic string +} + +func (c *mockSaramaConsumerGroupClaim) Topic() string { + return c.topic +} + +func (c *mockSaramaConsumerGroupClaim) Partition() int32 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) InitialOffset() int64 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) HighWaterMarkOffset() int64 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { + return c.messages +} + +type mockSaramaAsyncProducer struct { + input chan *sarama.ProducerMessage + messages []*sarama.ProducerMessage + mu sync.Mutex + setupCallbacksOnce sync.Once + closeOnce sync.Once + msgCallback func(*sarama.ProducerMessage) +} + +func newMockSaramaAsyncProducer(msgCallback func(*sarama.ProducerMessage)) *mockSaramaAsyncProducer { + return &mockSaramaAsyncProducer{ + input: make(chan *sarama.ProducerMessage), + messages: []*sarama.ProducerMessage{}, + msgCallback: msgCallback, + } +} + +func (p *mockSaramaAsyncProducer) AsyncClose() { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) Close() error { + p.closeOnce.Do(func() { close(p.input) }) + return nil +} + +func (p *mockSaramaAsyncProducer) setupCallbacks() { + if p.msgCallback == nil { + return + } + p.setupCallbacksOnce.Do(func() { + go func(callback func(*sarama.ProducerMessage)) { + for msg := range p.input { + p.messages = append(p.messages, msg) + go callback(msg) + } + }(p.msgCallback) + }) +} + +func (p *mockSaramaAsyncProducer) Input() chan<- *sarama.ProducerMessage { + defer p.setupCallbacks() + return p.input +} + +func (p *mockSaramaAsyncProducer) Successes() <-chan *sarama.ProducerMessage { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) Errors() <-chan *sarama.ProducerError { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) IsTransactional() bool { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) BeginTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) CommitTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) AbortTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) isClosed() bool { + p.mu.Lock() + defer p.mu.Unlock() + select { + case _, open := <-p.input: + return !open + default: + return false + } +} + +func newTestDevlog() log.Logger { + GinkgoHelper() + l, err := devlog.NewWithDefaults(GinkgoWriter) + Expect(err).To(Succeed()) + return l +} diff --git a/data/events/last_communications_recorder.go b/data/events/last_communications_recorder.go new file mode 100644 index 0000000000..96e6ec218c --- /dev/null +++ b/data/events/last_communications_recorder.go @@ -0,0 +1,41 @@ +package events + +import ( + "context" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + lognull "github.com/tidepool-org/platform/log/null" +) + +type LastCommunicationRecorder struct { + Repo alerts.LastCommunicationsRepository +} + +func NewLastCommunicationRecorder(repo alerts.LastCommunicationsRepository) *LastCommunicationRecorder { + return &LastCommunicationRecorder{ + Repo: repo, + } +} + +func (r *LastCommunicationRecorder) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + logger := r.log(ctx).WithFields(log.Fields{ + "userID": lastComm.UserID, + "dataSetID": lastComm.DataSetID, + }) + logger.Info("recording received data") + if err := r.Repo.RecordReceivedDeviceData(ctx, lastComm); err != nil { + return errors.Wrap(err, "Unable to record metadata on reception of device data") + } + return nil +} + +func (r *LastCommunicationRecorder) log(ctx context.Context) log.Logger { + if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { + return ctxLogger + } + return lognull.NewLogger() +} diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index d07891247e..39371ad4de 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -24,6 +24,8 @@ func AlertsRoutes() []service.Route { service.Get("/v1/users/:userId/followers/:followerUserId/alerts", GetAlert, api.RequireAuth), service.Post("/v1/users/:userId/followers/:followerUserId/alerts", UpsertAlert, api.RequireAuth), service.Delete("/v1/users/:userId/followers/:followerUserId/alerts", DeleteAlert, api.RequireAuth), + service.Get("/v1/users/:userId/followers/alerts", ListAlerts, api.RequireServer), + service.Get("/v1/users/overdue_communications", ListOverdueCommunications, api.RequireServer), } } @@ -114,8 +116,8 @@ func UpsertAlert(dCtx service.Context) { return } - a := &alerts.Alerts{} - if err := request.DecodeRequestBody(r.Request, a); err != nil { + cfg := &alerts.Config{} + if err := request.DecodeRequestBody(r.Request, cfg); err != nil { dCtx.RespondWithError(platform.ErrorJSONMalformed()) return } @@ -126,7 +128,6 @@ func UpsertAlert(dCtx service.Context) { return } - cfg := &alerts.Config{UserID: path.UserID, FollowedUserID: path.FollowedUserID, Alerts: *a} if err := repo.Upsert(ctx, cfg); err != nil { dCtx.RespondWithError(platform.ErrorInternalServerFailure()) lgr.WithError(err).Error("upserting alerts config") @@ -134,6 +135,67 @@ func UpsertAlert(dCtx service.Context) { } } +func ListAlerts(dCtx service.Context) { + r := dCtx.Request() + ctx := r.Context() + authDetails := request.GetAuthDetails(ctx) + repo := dCtx.AlertsRepository() + lgr := log.LoggerFromContext(ctx) + + if err := checkAuthentication(authDetails); err != nil { + lgr.Debug("authentication failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + + pathsUserID := r.PathParam("userId") + if err := checkUserIDConsistency(authDetails, pathsUserID); err != nil { + lgr.WithFields(log.Fields{"path": pathsUserID, "auth": authDetails.UserID()}). + Debug("user id consistency failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + + alerts, err := repo.List(ctx, pathsUserID) + if err != nil { + dCtx.RespondWithInternalServerFailure("listing alerts configs", err) + lgr.WithError(err).Error("listing alerts config") + return + } + if len(alerts) == 0 { + dCtx.RespondWithError(ErrorUserIDNotFound(pathsUserID)) + lgr.Debug("no alerts configs found") + } + + responder := request.MustNewResponder(dCtx.Response(), r) + responder.Data(http.StatusOK, alerts) +} + +func ListOverdueCommunications(dCtx service.Context) { + r := dCtx.Request() + ctx := r.Context() + + authDetails := request.GetAuthDetails(ctx) + lgr := log.LoggerFromContext(ctx) + if err := checkAuthentication(authDetails); err != nil { + lgr.Debug("authentication failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + overdue, err := dCtx.LastCommunicationsRepository().OverdueCommunications(ctx) + if err != nil { + lgr.WithError(err).Debug("Unable to list overdue records") + dCtx.RespondWithError(platform.ErrorInternalServerFailure()) + return + } + + lgr.WithField("found", len(overdue)).WithField("overdue", overdue). + Debug("/v1/users/overdue_communications") + + responder := request.MustNewResponder(dCtx.Response(), r) + responder.Data(http.StatusOK, overdue) +} + // checkUserIDConsistency verifies the userIDs in a request. // // For safety reasons, if these values don't agree, return an error. @@ -150,7 +212,7 @@ func checkUserIDConsistency(details request.AuthDetails, userIDFromPath string) // checkAuthentication ensures that the request has an authentication token. func checkAuthentication(details request.AuthDetails) error { - if details.Token() == "" { + if details.HasToken() && details.Token() == "" { return platformerrors.New("unauthorized") } if details.IsUser() { diff --git a/data/service/api/v1/alerts_test.go b/data/service/api/v1/alerts_test.go index c3b4b2f2a5..f517c224a7 100644 --- a/data/service/api/v1/alerts_test.go +++ b/data/service/api/v1/alerts_test.go @@ -3,7 +3,10 @@ package v1 import ( "bytes" "context" + "encoding/json" + "fmt" "net/http" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -17,10 +20,15 @@ import ( "github.com/tidepool-org/platform/service/test" ) +var testUserID = mocks.TestUserID1 +var testFollowedUserID = mocks.TestUserID2 + +const testDataSetID = "upid_000000000000" + func permsNoFollow() map[string]map[string]permission.Permissions { return map[string]map[string]permission.Permissions{ mocks.TestUserID1: { - mocks.TestUserID2: { + testFollowedUserID: { permission.Read: map[string]interface{}{}, }, }, @@ -32,11 +40,11 @@ var _ = Describe("Alerts endpoints", func() { testAuthenticationRequired := func(f func(dataservice.Context)) { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, })) dCtx := mocks.NewContext(t, "", "", body) - dCtx.MockAlertsRepository = newMockRepo() + dCtx.MockAlertsRepository = newMockAlertsRepo() badDetails := test.NewMockAuthDetails(request.MethodSessionToken, "", "") dCtx.WithAuthDetails(badDetails) @@ -49,11 +57,12 @@ var _ = Describe("Alerts endpoints", func() { testUserHasFollowPermission := func(f func(dataservice.Context)) { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, })) dCtx := mocks.NewContext(t, "", "", body) - dCtx.MockAlertsRepository = newMockRepo() + dCtx.MockAlertsRepository = newMockAlertsRepo() dCtx.MockPermissionClient = mocks.NewPermission(permsNoFollow(), nil, nil) f(dCtx) @@ -69,7 +78,7 @@ var _ = Describe("Alerts endpoints", func() { dCtx.WithAuthDetails(details) } dCtx.RESTRequest.PathParams["followerUserId"] = "bad" - repo := newMockRepo() + repo := newMockAlertsRepo() dCtx.MockAlertsRepository = repo f(dCtx) @@ -82,7 +91,7 @@ var _ = Describe("Alerts endpoints", func() { t := GinkgoT() body := bytes.NewBuffer([]byte(`"improper JSON data"`)) dCtx := mocks.NewContext(t, "", "", body) - repo := newMockRepo() + repo := newMockAlertsRepo() dCtx.MockAlertsRepository = repo f(dCtx) @@ -103,6 +112,24 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(DeleteAlert) }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + repo.AlertsForUserID[testFollowedUserID] = []*alerts.Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + }, + } + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + rec := dCtx.Recorder() + + DeleteAlert(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK)) + }) }) Describe("Upsert", func() { @@ -121,8 +148,65 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(UpsertAlert) }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + testCfg, _ := json.Marshal(testConfig()) + dCtx := mocks.NewContext(t, "", "", bytes.NewBuffer(testCfg)) + dCtx.MockAlertsRepository = repo + rec := dCtx.Recorder() + + UpsertAlert(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK)) + }) }) + Describe("ListAlerts", func() { + It("rejects unauthenticated users", func() { + testAuthenticationRequired(ListAlerts) + }) + + It("requires that the user's token matches the userID path param", func() { + testTokenUserIDMustMatchPathParam(ListAlerts, nil) + }) + + It("errors when no Config exists", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + dCtx.WithAuthDetails(mocks.ServiceAuthDetails()) + rec := dCtx.Recorder() + + ListAlerts(dCtx) + + Expect(rec.Code).To(Equal(http.StatusNotFound)) + }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + dCtx.WithAuthDetails(mocks.ServiceAuthDetails()) + rec := dCtx.Recorder() + repo.AlertsForUserID[testFollowedUserID] = []*alerts.Config{ + {FollowedUserID: "foo", UserID: "bar"}, + } + + ListAlerts(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK), rec.Body.String()) + got := []*alerts.Config{} + Expect(json.NewDecoder(rec.Body).Decode(&got)).To(Succeed()) + if Expect(len(got)).To(Equal(1)) { + Expect(got[0].UserID).To(Equal("bar")) + Expect(got[0].FollowedUserID).To(Equal("foo")) + } + }) + }) Describe("Get", func() { It("rejects unauthenticated users", func() { testAuthenticationRequired(GetAlert) @@ -132,14 +216,14 @@ var _ = Describe("Alerts endpoints", func() { testTokenUserIDMustMatchPathParam(GetAlert, nil) }) - It("errors when Config doesn't exist", func() { + It("errors when no Config exists", func() { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, })) dCtx := mocks.NewContext(t, "", "", body) - repo := newMockRepo() + repo := newMockAlertsRepo() repo.ReturnsError(mongo.ErrNoDocuments) dCtx.MockAlertsRepository = repo @@ -151,21 +235,107 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(func(dCtx dataservice.Context) { - dCtx.Request().PathParams["userId"] = mocks.TestUserID2 + dCtx.Request().PathParams["userId"] = testFollowedUserID GetAlert(dCtx) }) }) + + It("succeeds", func() { + t := GinkgoT() + url := fmt.Sprintf("/v1/users/%s/followers/%s/alerts", testFollowedUserID, testUserID) + dCtx := mocks.NewContext(t, "GET", url, nil) + repo := newMockAlertsRepo() + repo.GetAlertsResponses[testUserID+testFollowedUserID] = &alerts.Config{ + FollowedUserID: "foo", + UserID: "bar", + } + dCtx.MockAlertsRepository = repo + + GetAlert(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + got := &alerts.Config{} + Expect(json.NewDecoder(rec.Body).Decode(got)).To(Succeed()) + Expect(got.UserID).To(Equal("bar")) + Expect(got.FollowedUserID).To(Equal("foo")) + }) + }) + + Describe("ListOverdueCommunications", func() { + It("rejects unauthenticated users", func() { + testAuthenticationRequired(ListOverdueCommunications) + }) + + It("succeeds, even when there are no users found", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + dCtx.MockLastCommunicationsRepository = newMockLastCommunicationsRepo() + ListOverdueCommunications(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + }) + + It("errors when the upstream repo errors", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + lastCommunicationsRepo := newMockLastCommunicationsRepo() + lastCommunicationsRepo.ListOverdueCommunicationsError = fmt.Errorf("test error") + dCtx.MockLastCommunicationsRepository = lastCommunicationsRepo + + ListOverdueCommunications(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusInternalServerError)) + }) + + It("succeeds, even when there are no users found", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + lastCommunicationsRepo := newMockLastCommunicationsRepo() + testTime := time.Unix(123, 456) + lastCommunicationsRepo.ListOverdueCommunicationsResponses = [][]alerts.LastCommunication{ + { + { + LastReceivedDeviceData: testTime, + }, + }, + } + dCtx.MockLastCommunicationsRepository = lastCommunicationsRepo + + ListOverdueCommunications(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + got := []alerts.LastCommunication{} + Expect(json.NewDecoder(rec.Body).Decode(&got)).To(Succeed()) + if Expect(len(got)).To(Equal(1)) { + Expect(got[0].LastReceivedDeviceData).To(BeTemporally("==", testTime)) + } + }) }) }) type mockRepo struct { - UserID string - Error error + UserID string + Error error + AlertsForUserID map[string][]*alerts.Config + GetAlertsResponses map[string]*alerts.Config } -func newMockRepo() *mockRepo { - return &mockRepo{} +func newMockAlertsRepo() *mockRepo { + return &mockRepo{ + AlertsForUserID: map[string][]*alerts.Config{}, + GetAlertsResponses: map[string]*alerts.Config{}, + } } func (r *mockRepo) ReturnsError(err error) { @@ -189,6 +359,9 @@ func (r *mockRepo) Get(ctx context.Context, conf *alerts.Config) (*alerts.Config if conf != nil { r.UserID = conf.UserID } + if resp, found := r.GetAlertsResponses[conf.UserID+conf.FollowedUserID]; found { + return resp, nil + } return &alerts.Config{}, nil } @@ -202,6 +375,62 @@ func (r *mockRepo) Delete(ctx context.Context, conf *alerts.Config) error { return nil } +func (r *mockRepo) List(ctx context.Context, userID string) ([]*alerts.Config, error) { + if r.Error != nil { + return nil, r.Error + } + r.UserID = userID + alerts, ok := r.AlertsForUserID[userID] + if !ok { + return nil, nil + } + return alerts, nil +} + func (r *mockRepo) EnsureIndexes() error { return nil } + +type mockLastCommunicationsRepo struct { + ListOverdueCommunicationsResponses [][]alerts.LastCommunication + ListOverdueCommunicationsError error +} + +func newMockLastCommunicationsRepo() *mockLastCommunicationsRepo { + return &mockLastCommunicationsRepo{ + ListOverdueCommunicationsResponses: [][]alerts.LastCommunication{}, + } +} + +func (r *mockLastCommunicationsRepo) RecordReceivedDeviceData(_ context.Context, + _ alerts.LastCommunication) error { + + return nil +} + +func (r *mockLastCommunicationsRepo) OverdueCommunications(_ context.Context) ( + []alerts.LastCommunication, error) { + + if r.ListOverdueCommunicationsError != nil { + return nil, r.ListOverdueCommunicationsError + } + + if len(r.ListOverdueCommunicationsResponses) > 0 { + ret := r.ListOverdueCommunicationsResponses[0] + r.ListOverdueCommunicationsResponses = r.ListOverdueCommunicationsResponses[1:] + return ret, nil + } + return nil, nil +} + +func (r *mockLastCommunicationsRepo) EnsureIndexes() error { + return nil +} + +func testConfig() *alerts.Config { + return &alerts.Config{ + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + } +} diff --git a/data/service/api/v1/mocks/context.go b/data/service/api/v1/mocks/context.go index d0ac5c33d2..1d1afb20cb 100644 --- a/data/service/api/v1/mocks/context.go +++ b/data/service/api/v1/mocks/context.go @@ -10,7 +10,8 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data/service/context" - "github.com/tidepool-org/platform/devicetokens" + log "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" servicecontext "github.com/tidepool-org/platform/service/context" @@ -23,18 +24,20 @@ type Context struct { T likeT // authDetails should be updated via the WithAuthDetails method. - authDetails *test.MockAuthDetails - RESTRequest *rest.Request - ResponseWriter rest.ResponseWriter - recorder *httptest.ResponseRecorder - MockAlertsRepository alerts.Repository - MockDeviceTokensRepository devicetokens.Repository - MockPermissionClient permission.Client + authDetails *test.MockAuthDetails + RESTRequest *rest.Request + ResponseWriter rest.ResponseWriter + recorder *httptest.ResponseRecorder + MockAlertsRepository alerts.Repository + MockPermissionClient permission.Client + MockLastCommunicationsRepository alerts.LastCommunicationsRepository } func NewContext(t likeT, method, url string, body io.Reader) *Context { details := DefaultAuthDetails() ctx := request.NewContextWithAuthDetails(stdcontext.Background(), details) + lgr := logtest.NewLogger() + ctx = log.NewContextWithLogger(ctx, lgr) r, err := http.NewRequestWithContext(ctx, method, url, body) if err != nil { t.Fatalf("error creating request: %s", err) @@ -98,10 +101,10 @@ func (c *Context) AlertsRepository() alerts.Repository { return c.MockAlertsRepository } -func (c *Context) DeviceTokensRepository() devicetokens.Repository { - return c.MockDeviceTokensRepository -} - func (c *Context) PermissionClient() permission.Client { return c.MockPermissionClient } + +func (c *Context) LastCommunicationsRepository() alerts.LastCommunicationsRepository { + return c.MockLastCommunicationsRepository +} diff --git a/data/service/api/v1/mocks/mocklogger_test_gen.go b/data/service/api/v1/mocks/mocklogger_test_gen.go index 81757d6525..65b949fcee 100644 --- a/data/service/api/v1/mocks/mocklogger_test_gen.go +++ b/data/service/api/v1/mocks/mocklogger_test_gen.go @@ -8,7 +8,6 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - log "github.com/tidepool-org/platform/log" ) diff --git a/data/service/api/v1/users_datasets_create_test.go b/data/service/api/v1/users_datasets_create_test.go index 26004f79fb..302a4f7474 100644 --- a/data/service/api/v1/users_datasets_create_test.go +++ b/data/service/api/v1/users_datasets_create_test.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + dataService "github.com/tidepool-org/platform/data/service" "github.com/tidepool-org/platform/data/summary/reporters" "github.com/tidepool-org/platform/clinics" @@ -84,6 +85,8 @@ type testingT interface { Fatalf(format string, args ...any) } +var _ dataService.Context = (*mockDataServiceContext)(nil) + type mockDataServiceContext struct { t testingT @@ -224,3 +227,7 @@ func (c *mockDataServiceContext) DataSourceClient() dataSource.Client { func (c *mockDataServiceContext) SummaryReporter() *reporters.PatientRealtimeDaysReporter { panic("not implemented") } + +func (c *mockDataServiceContext) LastCommunicationsRepository() alerts.LastCommunicationsRepository { + panic("not implemented") +} diff --git a/data/service/context.go b/data/service/context.go index 7ea41f3005..5cb714a7f0 100644 --- a/data/service/context.go +++ b/data/service/context.go @@ -29,6 +29,7 @@ type Context interface { SummaryRepository() dataStore.SummaryRepository SyncTaskRepository() syncTaskStore.SyncTaskRepository AlertsRepository() alerts.Repository + LastCommunicationsRepository() alerts.LastCommunicationsRepository SummarizerRegistry() *summary.SummarizerRegistry SummaryReporter() *reporters.PatientRealtimeDaysReporter diff --git a/data/service/context/standard.go b/data/service/context/standard.go index 79d7a6c95d..f9c72b07d9 100644 --- a/data/service/context/standard.go +++ b/data/service/context/standard.go @@ -26,21 +26,22 @@ import ( type Standard struct { *serviceContext.Responder - authClient auth.Client - metricClient metric.Client - permissionClient permission.Client - dataDeduplicatorFactory deduplicator.Factory - dataStore dataStore.Store - dataRepository dataStore.DataRepository - summaryRepository dataStore.SummaryRepository - summarizerRegistry *summary.SummarizerRegistry - summaryReporter *reporters.PatientRealtimeDaysReporter - syncTaskStore syncTaskStore.Store - syncTasksRepository syncTaskStore.SyncTaskRepository - dataClient dataClient.Client - clinicsClient clinics.Client - dataSourceClient dataSource.Client - alertsRepository alerts.Repository + authClient auth.Client + metricClient metric.Client + permissionClient permission.Client + dataDeduplicatorFactory deduplicator.Factory + dataStore dataStore.Store + dataRepository dataStore.DataRepository + summaryRepository dataStore.SummaryRepository + summarizerRegistry *summary.SummarizerRegistry + summaryReporter *reporters.PatientRealtimeDaysReporter + syncTaskStore syncTaskStore.Store + syncTasksRepository syncTaskStore.SyncTaskRepository + dataClient dataClient.Client + clinicsClient clinics.Client + dataSourceClient dataSource.Client + alertsRepository alerts.Repository + lastCommunicationsRepository alerts.LastCommunicationsRepository } func WithContext(authClient auth.Client, metricClient metric.Client, permissionClient permission.Client, @@ -129,6 +130,9 @@ func (s *Standard) Close() { if s.alertsRepository != nil { s.alertsRepository = nil } + if s.lastCommunicationsRepository != nil { + s.lastCommunicationsRepository = nil + } } func (s *Standard) AuthClient() auth.Client { @@ -208,3 +212,10 @@ func (s *Standard) AlertsRepository() alerts.Repository { } return s.alertsRepository } + +func (s *Standard) LastCommunicationsRepository() alerts.LastCommunicationsRepository { + if s.lastCommunicationsRepository == nil { + s.lastCommunicationsRepository = s.dataStore.NewLastCommunicationsRepository() + } + return s.lastCommunicationsRepository +} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 80911f4b20..9cf4c98ec1 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -2,15 +2,17 @@ package service import ( "context" - "log" - "os" - - "github.com/tidepool-org/platform/clinics" + "strings" + "time" "github.com/IBM/sarama" + "github.com/kelseyhightower/envconfig" + eventsCommon "github.com/tidepool-org/go-common/events" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/application" + "github.com/tidepool-org/platform/clinics" dataDeduplicatorDeduplicator "github.com/tidepool-org/platform/data/deduplicator/deduplicator" dataDeduplicatorFactory "github.com/tidepool-org/platform/data/deduplicator/factory" dataEvents "github.com/tidepool-org/platform/data/events" @@ -22,11 +24,12 @@ import ( dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/events" - logInternal "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/log" metricClient "github.com/tidepool-org/platform/metric/client" "github.com/tidepool-org/platform/permission" permissionClient "github.com/tidepool-org/platform/permission/client" "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/push" "github.com/tidepool-org/platform/service/server" "github.com/tidepool-org/platform/service/service" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" @@ -44,7 +47,9 @@ type Standard struct { dataClient *Client clinicsClient *clinics.Client dataSourceClient *dataSourceServiceClient.Client + pusher dataEvents.Pusher userEventsHandler events.Runner + alertsEventsHandler events.Runner api *api.Standard server *server.Standard } @@ -87,9 +92,18 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeDataSourceClient(); err != nil { return err } + if err := s.initializeSaramaLogger(); err != nil { + return err + } + if err := s.initializePusher(); err != nil { + return err + } if err := s.initializeUserEventsHandler(); err != nil { return err } + if err := s.initializeAlertsEventsHandler(); err != nil { + return err + } if err := s.initializeAPI(); err != nil { return err } @@ -110,6 +124,13 @@ func (s *Standard) Terminate() { } s.userEventsHandler = nil } + if s.alertsEventsHandler != nil { + s.Logger().Info("Terminating the alertsEventsHandler") + if err := s.alertsEventsHandler.Terminate(); err != nil { + s.Logger().Errorf("Error while terminating the alertsEventsHandler: %v", err) + } + s.alertsEventsHandler = nil + } s.api = nil s.dataClient = nil if s.syncTaskStore != nil { @@ -140,6 +161,9 @@ func (s *Standard) Run() error { go func() { errs <- s.userEventsHandler.Run() }() + go func() { + errs <- s.alertsEventsHandler.Run() + }() go func() { errs <- s.server.Serve() }() @@ -406,9 +430,8 @@ func (s *Standard) initializeServer() error { func (s *Standard) initializeUserEventsHandler() error { s.Logger().Debug("Initializing user events handler") - sarama.Logger = log.New(os.Stdout, "SARAMA ", log.LstdFlags|log.Lshortfile) - ctx := logInternal.NewContextWithLogger(context.Background(), s.Logger()) + ctx := log.NewContextWithLogger(context.Background(), s.Logger()) handler := dataEvents.NewUserDataDeletionHandler(ctx, s.dataStore, s.dataSourceStructuredStore) handlers := []eventsCommon.EventHandler{handler} runner := events.NewRunner(handlers) @@ -419,3 +442,125 @@ func (s *Standard) initializeUserEventsHandler() error { return nil } + +func (s *Standard) initializeSaramaLogger() error { + // Multiple properties of Standard use the sarama package. This is + // intended to be the one place that the sarama Logger is initialized, + // before any of the properties that need it are run. + sarama.Logger = log.NewSarama(s.Logger()) + return nil +} + +func (s *Standard) initializePusher() error { + var err error + + apns2Config := &struct { + SigningKey []byte `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_SIGNING_KEY"` + KeyID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_KEY_ID"` + BundleID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_BUNDLE_ID"` + TeamID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_TEAM_ID"` + }{} + if err := envconfig.Process("", apns2Config); err != nil { + return errors.Wrap(err, "Unable to process APNs pusher config") + } + + var pusher dataEvents.Pusher + pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, + apns2Config.TeamID, apns2Config.BundleID) + if err != nil { + s.Logger().WithError(err).Warn("falling back to logging of push notifications") + pusher = push.NewLogPusher(s.Logger()) + } + s.pusher = pusher + + return nil +} + +func (s *Standard) initializeAlertsEventsHandler() error { + s.Logger().Debug("Initializing alerts events handler") + + commonConfig := eventsCommon.NewConfig() + if err := commonConfig.LoadFromEnv(); err != nil { + return err + } + + topics := []string{"data.alerts", "data.deviceData.alerts"} + // Some kafka topics use a `-` as a prefix. But MongoDB CDC topics are created with + // `.`. This code is using CDC topics, so ensuring that a `.` is used for alerts events + // lines everything up as expected. + topicPrefix := strings.ReplaceAll(commonConfig.KafkaTopicPrefix, "-", ".") + prefixedTopics := make([]string, 0, len(topics)) + for _, topic := range topics { + prefixedTopics = append(prefixedTopics, topicPrefix+topic) + } + + alertsRepo := s.dataStore.NewAlertsRepository() + dataRepo := s.dataStore.NewAlertsDataRepository() + lastCommunicationsRepo := s.dataStore.NewLastCommunicationsRepository() + + alertsEvaluator := alerts.NewEvaluator(alertsRepo, dataRepo, s.PermissionClient(), + s.Logger(), s.AuthClient()) + + ec := &dataEvents.Consumer{ + Alerts: alertsRepo, + Evaluator: alertsEvaluator, + Data: dataRepo, + DeviceTokens: s.AuthClient(), + Logger: s.Logger(), + Permissions: s.PermissionClient(), + Pusher: s.pusher, + LastCommunications: dataEvents.NewLastCommunicationRecorder(lastCommunicationsRepo), + TokensProvider: s.AuthClient(), + } + + runnerCfg := dataEvents.SaramaRunnerConfig{ + Brokers: commonConfig.KafkaBrokers, + GroupID: "alerts", + Topics: prefixedTopics, + Sarama: commonConfig.SaramaConfig, + MessageConsumer: &dataEvents.AlertsEventsConsumer{ + Consumer: ec, + }, + } + + cfg := &alertsEventsHandlerConfig{Config: platform.NewConfig()} + cfg.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("alerts", "retry") + loader := platform.NewConfigReporterLoader(reporter) + if err := cfg.Load(loader); err != nil { + return errors.Wrap(err, "unable to alerts retry delays config") + } + delays, err := parseCommaSeparatedDurations(reporter.GetWithDefault("delays", "1s")) + if err != nil { + return errors.Wrap(err, "Unable to read configured alerts retry delays") + } + + eventsRunner := dataEvents.NewCascadingSaramaEventsRunner(runnerCfg, s.Logger(), delays) + runner := dataEvents.NewSaramaRunner(eventsRunner) + if err := runner.Initialize(); err != nil { + return errors.Wrap(err, "Unable to initialize alerts events handler runner") + } + s.alertsEventsHandler = runner + + return nil +} + +type alertsEventsHandlerConfig struct { + *platform.Config + RetryDelaysConfig string `envconfig:"TIDEPOOL_DATA_SERVICE_ALERTS_RETRY_DELAYS" default:"1s"` +} + +func parseCommaSeparatedDurations(s string) ([]time.Duration, error) { + out := []time.Duration{} + for _, d := range strings.Split(s, ",") { + if d == "" { + continue + } + dur, err := time.ParseDuration(d) + if err != nil { + return nil, err + } + out = append(out, dur) + } + return out, nil +} diff --git a/data/store/mongo/mongo.go b/data/store/mongo/mongo.go index 8ebfa97239..c4f2d553cf 100644 --- a/data/store/mongo/mongo.go +++ b/data/store/mongo/mongo.go @@ -29,6 +29,7 @@ func (s *Store) EnsureIndexes() error { dataRepository := s.NewDataRepository() summaryRepository := s.NewSummaryRepository() alertsRepository := s.NewAlertsRepository() + lastCommunicationsRepository := s.NewLastCommunicationsRepository() if err := dataRepository.EnsureIndexes(); err != nil { return err @@ -42,6 +43,10 @@ func (s *Store) EnsureIndexes() error { return err } + if err := lastCommunicationsRepository.EnsureIndexes(); err != nil { + return err + } + return nil } @@ -66,3 +71,13 @@ func (s *Store) NewAlertsRepository() alerts.Repository { r := alertsRepo(*s.Store.GetRepository("alerts")) return &r } + +func (s *Store) NewLastCommunicationsRepository() alerts.LastCommunicationsRepository { + r := lastCommunicationsRepo(*s.Store.GetRepository("lastCommunications")) + return &r +} + +func (s *Store) NewAlertsDataRepository() alerts.DataRepository { + r := alertsDataRepo(*s.Store.GetRepository("deviceData")) + return &r +} diff --git a/data/store/mongo/mongo_alerts.go b/data/store/mongo/mongo_alerts.go index ee313f3ffb..5887f8f834 100644 --- a/data/store/mongo/mongo_alerts.go +++ b/data/store/mongo/mongo_alerts.go @@ -3,12 +3,17 @@ package mongo import ( "context" "fmt" + "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose/continuous" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/errors" structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" ) @@ -16,9 +21,21 @@ import ( type alertsRepo structuredmongo.Repository // Upsert will create or update the given Config. +// +// Once set, UploadID, UserID, and FollowedUserID cannot be changed. This is to prevent a +// user from granting themselves access to another data set. func (r *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { opts := options.Update().SetUpsert(true) - _, err := r.UpdateOne(ctx, r.filter(conf), bson.M{"$set": conf}, opts) + filter := bson.D{ + {Key: "userId", Value: conf.UserID}, + {Key: "followedUserId", Value: conf.FollowedUserID}, + {Key: "uploadId", Value: conf.UploadID}, + } + doc := bson.M{ + "$set": bson.M{"alerts": conf.Alerts, "activity": conf.Activity}, + "$setOnInsert": filter, + } + _, err := r.UpdateOne(ctx, filter, doc, opts) if err != nil { return fmt.Errorf("upserting alerts.Config: %w", err) } @@ -34,6 +51,26 @@ func (r *alertsRepo) Delete(ctx context.Context, cfg *alerts.Config) error { return nil } +// List will retrieve any Configs that are defined by followers of the given user. +func (r *alertsRepo) List(ctx context.Context, followedUserID string) ([]*alerts.Config, error) { + filter := bson.D{ + {Key: "followedUserId", Value: followedUserID}, + } + cursor, err := r.Find(ctx, filter, nil) + if err != nil { + return nil, errors.Wrapf(err, "Unable to list alerts.Config(s) for followed user %s", followedUserID) + } + defer cursor.Close(ctx) + out := []*alerts.Config{} + if err := cursor.All(ctx, &out); err != nil { + return nil, errors.Wrapf(err, "Unable to decode alerts.Config(s) for followed user %s", followedUserID) + } + if err := cursor.Err(); err != nil { + return nil, errors.Wrapf(err, "Unexpected error for followed user %s", followedUserID) + } + return out, nil +} + // Get will retrieve the given Config. func (r *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Config, error) { res := r.FindOne(ctx, r.filter(cfg), nil) @@ -64,8 +101,60 @@ func (r *alertsRepo) EnsureIndexes() error { } func (r *alertsRepo) filter(cfg *alerts.Config) interface{} { - return &alerts.Config{ - UserID: cfg.UserID, - FollowedUserID: cfg.FollowedUserID, + return bson.D{ + {Key: "userId", Value: cfg.UserID}, + {Key: "followedUserId", Value: cfg.FollowedUserID}, + } +} + +type alertsDataRepo structuredmongo.Repository + +func (d *alertsDataRepo) GetAlertableData(ctx context.Context, + params alerts.GetAlertableDataParams) (*alerts.GetAlertableDataResponse, error) { + + if params.End.IsZero() { + params.End = time.Now() + } + + cursor, err := d.getAlertableData(ctx, params, dosingdecision.Type) + if err != nil { + return nil, err + } + dosingDecisions := []*dosingdecision.DosingDecision{} + if err := cursor.All(ctx, &dosingDecisions); err != nil { + return nil, errors.Wrap(err, "Unable to load alertable dosing documents") + } + cursor, err = d.getAlertableData(ctx, params, continuous.Type) + if err != nil { + return nil, err + } + glucoseData := []*glucose.Glucose{} + if err := cursor.All(ctx, &glucoseData); err != nil { + return nil, errors.Wrap(err, "Unable to load alertable glucose documents") + } + response := &alerts.GetAlertableDataResponse{ + DosingDecisions: dosingDecisions, + Glucose: glucoseData, + } + + return response, nil +} + +func (d *alertsDataRepo) getAlertableData(ctx context.Context, + params alerts.GetAlertableDataParams, typ string) (*mongo.Cursor, error) { + + selector := bson.M{ + "_active": true, + "uploadId": params.UploadID, + "type": typ, + "_userId": params.UserID, + "time": bson.M{"$gte": params.Start, "$lte": params.End}, + } + findOptions := options.Find().SetSort(bson.D{{Key: "time", Value: -1}}) + cursor, err := d.Find(ctx, selector, findOptions) + if err != nil { + format := "Unable to find alertable %s data in dataset %s" + return nil, errors.Wrapf(err, format, typ, params.UploadID) } + return cursor, nil } diff --git a/data/store/mongo/mongo_data.go b/data/store/mongo/mongo_data.go index 4076a968af..c827999bc9 100644 --- a/data/store/mongo/mongo_data.go +++ b/data/store/mongo/mongo_data.go @@ -195,10 +195,6 @@ func (d *DataRepository) DestroyDataForUserByID(ctx context.Context, userID stri return nil } -func (d *DataRepository) mongoClient() *mongo.Client { - return d.DatumRepository.Database().Client() -} - func isTypeUpload(typ []string) bool { return slices.Contains(typ, strings.ToLower(upload.Type)) } diff --git a/data/store/mongo/mongo_datum.go b/data/store/mongo/mongo_datum.go index 977c6f813b..159c7ec93f 100644 --- a/data/store/mongo/mongo_datum.go +++ b/data/store/mongo/mongo_datum.go @@ -11,14 +11,9 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/data" - "github.com/tidepool-org/platform/data/store" "github.com/tidepool-org/platform/data/summary/types" baseDatum "github.com/tidepool-org/platform/data/types" - "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/data/types/blood/glucose/continuous" - "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/data/types/upload" - platerrors "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" structureValidator "github.com/tidepool-org/platform/structure/validator" @@ -646,56 +641,6 @@ func (d *DatumRepository) GetDataRange(ctx context.Context, userId string, typ [ return cursor, nil } -func (d *DatumRepository) GetAlertableData(ctx context.Context, - params store.AlertableParams) (*store.AlertableResponse, error) { - - if params.End.IsZero() { - params.End = time.Now() - } - - cursor, err := d.getAlertableData(ctx, params, dosingdecision.Type) - if err != nil { - return nil, err - } - dosingDecisions := []*dosingdecision.DosingDecision{} - if err := cursor.All(ctx, &dosingDecisions); err != nil { - return nil, platerrors.Wrap(err, "Unable to load alertable dosing documents") - } - cursor, err = d.getAlertableData(ctx, params, continuous.Type) - if err != nil { - return nil, err - } - glucoseData := []*glucose.Glucose{} - if err := cursor.All(ctx, &glucoseData); err != nil { - return nil, platerrors.Wrap(err, "Unable to load alertable glucose documents") - } - response := &store.AlertableResponse{ - DosingDecisions: dosingDecisions, - Glucose: glucoseData, - } - - return response, nil -} - -func (d *DatumRepository) getAlertableData(ctx context.Context, - params store.AlertableParams, typ string) (*mongo.Cursor, error) { - - selector := bson.M{ - "_active": true, - "uploadId": params.UploadID, - "type": typ, - "_userId": params.UserID, - "time": bson.M{"$gte": params.Start, "$lte": params.End}, - } - findOptions := options.Find().SetSort(bson.D{{Key: "time", Value: -1}}) - cursor, err := d.Find(ctx, selector, findOptions) - if err != nil { - format := "Unable to find alertable %s data in dataset %s" - return nil, platerrors.Wrapf(err, format, typ, params.UploadID) - } - return cursor, nil -} - func (d *DatumRepository) getTimeRange(ctx context.Context, userId string, typ []string, status *data.UserDataStatus) (err error) { timestamp := time.Now().UTC() futureCutoff := timestamp.AddDate(0, 0, 1) diff --git a/data/store/mongo/mongo_recorder.go b/data/store/mongo/mongo_recorder.go new file mode 100644 index 0000000000..a24b0b1fb0 --- /dev/null +++ b/data/store/mongo/mongo_recorder.go @@ -0,0 +1,75 @@ +package mongo + +import ( + "context" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" + structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" +) + +// lastCommunicationsRepo implements LastCommunicationsRepository, writing data to a +// MongoDB collection. +type lastCommunicationsRepo structuredmongo.Repository + +func (r *lastCommunicationsRepo) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + opts := options.Update().SetUpsert(true) + _, err := r.UpdateOne(ctx, r.filter(lastComm), bson.M{"$set": lastComm}, opts) + if err != nil { + return fmt.Errorf("upserting alerts.LastCommunication: %w", err) + } + return nil +} + +func (r *lastCommunicationsRepo) EnsureIndexes() error { + repo := structuredmongo.Repository(*r) + return (&repo).CreateAllIndexes(context.Background(), []mongo.IndexModel{ + { + Keys: bson.D{ + {Key: "lastReceivedDeviceData", Value: 1}, + }, + Options: options.Index(). + SetName("LastReceivedDeviceData"), + }, + { + Keys: bson.D{ + {Key: "dataSetId", Value: 1}, + }, + Options: options.Index(). + SetUnique(true). + SetName("DataSetIdUnique"), + }, + }) +} + +func (r *lastCommunicationsRepo) filter(lastComm alerts.LastCommunication) map[string]any { + return map[string]any{ + "userId": lastComm.UserID, + "dataSetId": lastComm.DataSetID, + } +} + +func (d *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ([]alerts.LastCommunication, error) { + start := time.Now().Add(-alerts.MinimumNoCommunicationDelay) + selector := bson.M{ + "lastReceivedDeviceData": bson.M{"$lte": start}, + } + findOptions := options.Find().SetSort(bson.D{{Key: "lastReceivedDeviceData", Value: 1}}) + cursor, err := d.Find(ctx, selector, findOptions) + if err != nil { + return nil, errors.Wrapf(err, "Unable to list overdue records") + } + records := []alerts.LastCommunication{} + if err := cursor.All(ctx, &records); err != nil { + return nil, errors.Wrapf(err, "Unable to iterate overdue records") + } + return records, nil +} diff --git a/data/store/mongo/mongo_test.go b/data/store/mongo/mongo_test.go index 7b0a0fd2a4..b4f7b452fe 100644 --- a/data/store/mongo/mongo_test.go +++ b/data/store/mongo/mongo_test.go @@ -2,6 +2,7 @@ package mongo_test import ( "context" + "encoding/json" "fmt" "math/rand" "sync" @@ -17,6 +18,7 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data" + "github.com/tidepool-org/platform/data/service/api/v1/mocks" dataStore "github.com/tidepool-org/platform/data/store" dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" dataTest "github.com/tidepool-org/platform/data/test" @@ -238,8 +240,10 @@ func DataSetDatumAsInterface(dataSetDatum data.Datum) interface{} { var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { var repository dataStore.DataRepository + var alertsDataRepository alerts.DataRepository var summaryRepository dataStore.SummaryRepository var alertsRepository alerts.Repository + var lastCommunicationsRepository alerts.LastCommunicationsRepository var logger = logTest.NewLogger() var store *dataStoreMongo.Store @@ -266,6 +270,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { var dataSetCollection *mongo.Collection var summaryCollection *mongo.Collection var alertsCollection *mongo.Collection + var recordsCollection *mongo.Collection var collectionsOnce sync.Once BeforeEach(func() { @@ -274,6 +279,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { dataSetCollection = store.GetCollection("deviceDataSets") summaryCollection = store.GetCollection("summary") alertsCollection = store.GetCollection("alerts") + recordsCollection = store.GetCollection("lastCommunications") }) }) @@ -289,6 +295,8 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(err).To(Succeed()) _, err = alertsCollection.DeleteMany(ctx, all) Expect(err).To(Succeed()) + _, err = recordsCollection.DeleteMany(ctx, all) + Expect(err).To(Succeed()) }) Context("EnsureIndexes", func() { @@ -445,14 +453,24 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { }) }) + Context("NewLastCommunicationsRepository", func() { + It("returns a new repository", func() { + lastCommunicationsRepository = store.NewLastCommunicationsRepository() + Expect(lastCommunicationsRepository).ToNot(BeNil()) + }) + }) + Context("with a new repository", func() { BeforeEach(func() { repository = store.NewDataRepository() summaryRepository = store.NewSummaryRepository() alertsRepository = store.NewAlertsRepository() + alertsDataRepository = store.NewAlertsDataRepository() + lastCommunicationsRepository = store.NewLastCommunicationsRepository() Expect(repository).ToNot(BeNil()) Expect(summaryRepository).ToNot(BeNil()) Expect(alertsRepository).ToNot(BeNil()) + Expect(alertsDataRepository).ToNot(BeNil()) }) Context("with persisted data sets", func() { @@ -2409,13 +2427,15 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(repository.CreateDataSet(ctx, testSet)).To(Succeed()) testSetData := testDataSetData(testSet) Expect(repository.CreateDataSetData(ctx, testSet, testSetData)).To(Succeed()) + alertsDataRepository = store.NewAlertsDataRepository() + Expect(alertsDataRepository).ToNot(BeNil()) - params := dataStore.AlertableParams{ + params := alerts.GetAlertableDataParams{ Start: time.Now().Add(-time.Hour), UserID: testUserID, UploadID: *testSet.UploadID, } - resp, err := repository.GetAlertableData(ctx, params) + resp, err := alertsDataRepository.GetAlertableData(ctx, params) Expect(err).To(Succeed()) Expect(resp).ToNot(BeNil()) @@ -2427,6 +2447,11 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Context("alerts", func() { BeforeEach(func() { + var err error + ctx := context.Background() + all := bson.D{} + _, err = alertsCollection.DeleteMany(ctx, all) + Expect(err).To(Succeed()) alertsRepository = store.NewAlertsRepository() Expect(alertsRepository).ToNot(BeNil()) }) @@ -2439,8 +2464,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { ctx := context.Background() filter := bson.M{} if upsertDoc { - Expect(alertsRepository.Upsert(ctx, cfg)). - To(Succeed()) + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) filter["userId"] = cfg.UserID filter["followedUserId"] = cfg.FollowedUserID } @@ -2463,7 +2487,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { It("updates the existing document", func() { ctx, cfg, filter := prep(true) - cfg.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} + cfg.Alerts.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} err := alertsRepository.Upsert(ctx, cfg) Expect(err).To(Succeed()) @@ -2471,10 +2495,72 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { res := store.GetCollection("alerts").FindOne(ctx, filter) Expect(res.Err()).To(Succeed()) Expect(res.Decode(doc)).To(Succeed()) - Expect(doc.Low).ToNot(BeNil()) - Expect(doc.Low.Base.Enabled).To(Equal(true)) + jsonOut, _ := json.Marshal(doc) + Expect(doc.Alerts.Low).ToNot(BeNil(), string(jsonOut)) + Expect(doc.Alerts.Low.Base.Enabled).To(Equal(true)) }) + It("sets userId, followedUserId, and uploadId only on creation", func() { + ctx, cfg, filter := prep(false) + cfg.UploadID = "something" + + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) + doc := &alerts.Config{} + res := store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.UserID).To(Equal("user-id")) + Expect(doc.FollowedUserID).To(Equal("followed-user-id")) + Expect(doc.UploadID).To(Equal("something")) + + testDelay := 42 * time.Minute + doc.Alerts.Low = &alerts.LowAlert{} + doc.Alerts.Low.Delay = alerts.DurationMinutes(testDelay) + doc.UploadID = "something else" + doc.UserID = "new junk" + doc.FollowedUserID = "this shouldn't be" + + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) + res = store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.UploadID).To(Equal("something")) + Expect(doc.FollowedUserID).To(Equal("followed-user-id")) + Expect(doc.UserID).To(Equal("user-id")) + Expect(doc.Alerts.Low.Delay.Duration()).To(Equal(testDelay)) + }) + + It("updates the Config's Activity", func() { + ctx, cfg, filter := prep(true) + testSent := time.Now().Add(-3 * time.Minute) + testTriggered := time.Now().Add(-5 * time.Minute) + cfg.Alerts.Low = &alerts.LowAlert{ + Base: alerts.Base{ + Enabled: true, + // Activity: alerts.AlertActivity{ + // Triggered: testTriggered, + // Sent: testSent, + // // Resolved is unset, so it should be a zero value. + // }, + }, + } + cfg.Activity.Low.Sent = testSent + cfg.Activity.Low.Triggered = testTriggered + + err := alertsRepository.Upsert(ctx, cfg) + Expect(err).To(Succeed()) + + doc := &alerts.Config{} + //raw := map[string]any{} + res := store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.Alerts.Low).ToNot(BeNil()) + Expect(doc.Alerts.Low.Base.Enabled).To(Equal(true)) + Expect(doc.Activity.Low.Triggered).To(BeTemporally("~", testTriggered, time.Millisecond)) + Expect(doc.Activity.Low.Sent).To(BeTemporally("~", testSent, time.Millisecond)) + Expect(doc.Activity.Low.Resolved).To(Equal(time.Time{})) + }) }) Describe("Get", func() { @@ -2493,20 +2579,23 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { UserID: "879d5cb2-f70d-4b05-8d38-fb6d88ef2ea9", FollowedUserID: "d2ee01db-3458-42ac-95d2-ac2fc571a21d", Alerts: alerts.Alerts{ + // DataAlerts: alerts.DataAlerts{ High: &alerts.HighAlert{ Base: alerts.Base{Enabled: true}, }, - }} + // }, + }, + } Expect(alertsRepository.Upsert(ctx, other)).To(Succeed()) - cfg.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} + cfg.Alerts.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} err := alertsRepository.Upsert(ctx, cfg) Expect(err).To(Succeed()) got, err := alertsRepository.Get(ctx, cfg) Expect(err).To(Succeed()) Expect(got).ToNot(BeNil()) - Expect(got.Low).ToNot(BeNil()) - Expect(got.Low.Enabled).To(Equal(true)) + Expect(got.Alerts.Low).ToNot(BeNil()) + Expect(got.Alerts.Low.Enabled).To(Equal(true)) Expect(got.UserID).To(Equal(cfg.UserID)) Expect(got.FollowedUserID).To(Equal(cfg.FollowedUserID)) }) @@ -2523,6 +2612,70 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(res.Err()).To(MatchError(mongo.ErrNoDocuments)) }) }) + + Describe("List", func() { + It("lists only matching configs", func() { + ctx, cfg, _ := prep(true) + cfg2 := &alerts.Config{ + FollowedUserID: "followed-user-id-2", + UserID: "user-id", + } + Expect(alertsRepository.Upsert(ctx, cfg2)).To(Succeed()) + cfg3 := &alerts.Config{ + FollowedUserID: "followed-user-id", + UserID: "user-id-2", + } + Expect(alertsRepository.Upsert(ctx, cfg3)).To(Succeed()) + + got, err := alertsRepository.List(ctx, cfg.FollowedUserID) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(2)) + }) + }) + }) + + Context("LastCommunicationsRecorder", func() { + BeforeEach(func() { + lastCommunicationsRepository = store.NewLastCommunicationsRepository() + Expect(lastCommunicationsRepository).ToNot(BeNil()) + }) + + Describe("OverdueCommunications", func() { + It("retrieves matching records", func() { + ctx := context.Background() + got, err := lastCommunicationsRepository.OverdueCommunications(ctx) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(0)) + }) + + It("retrieves matching records2", func() { + ctx := context.Background() + testLastComm := alerts.LastCommunication{ + UserID: testUserID, + DataSetID: testDataSetID, + LastReceivedDeviceData: time.Unix(123, 456), + } + Expect(lastCommunicationsRepository.RecordReceivedDeviceData(ctx, testLastComm)).To(Succeed()) + testLastComm2 := alerts.LastCommunication{ + UserID: testUserID + "2", + DataSetID: testDataSetID + "2", + LastReceivedDeviceData: time.Now(), + } + Expect(lastCommunicationsRepository.RecordReceivedDeviceData(ctx, testLastComm2)).To(Succeed()) + + got, err := lastCommunicationsRepository.OverdueCommunications(ctx) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(1)) + }) + + It("is true", func() { + Expect(true).To(BeTrue()) + }) + }) }) }) }) + +var testUserID = mocks.TestUserID1 + +const testDataSetID = "blah" diff --git a/data/store/store.go b/data/store/store.go index 7410d76c88..2ea9c9452d 100644 --- a/data/store/store.go +++ b/data/store/store.go @@ -7,9 +7,6 @@ import ( "go.mongodb.org/mongo-driver/mongo" "github.com/tidepool-org/platform/alerts" - "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/data/types/dosingdecision" - "github.com/tidepool-org/platform/data" "github.com/tidepool-org/platform/data/types/upload" "github.com/tidepool-org/platform/page" @@ -23,6 +20,7 @@ type Store interface { NewDataRepository() DataRepository NewSummaryRepository() SummaryRepository NewAlertsRepository() alerts.Repository + NewLastCommunicationsRepository() alerts.LastCommunicationsRepository } // DataSetRepository is the interface for interacting and modifying @@ -65,9 +63,6 @@ type DatumRepository interface { GetDataRange(ctx context.Context, userId string, typ []string, status *data.UserDataStatus) (*mongo.Cursor, error) GetLastUpdatedForUser(ctx context.Context, userId string, typ []string, lastUpdated time.Time) (*data.UserDataStatus, error) DistinctUserIDs(ctx context.Context, typ []string) ([]string, error) - - // GetAlertableData queries for the data used to evaluate alerts configurations. - GetAlertableData(ctx context.Context, params AlertableParams) (*AlertableResponse, error) } // DataRepository is the combined interface of DataSetRepository and @@ -98,19 +93,3 @@ type SummaryRepository interface { GetStore() *storeStructuredMongo.Repository } - -type AlertableParams struct { - // UserID of the user that owns the data. - UserID string - // UploadID of the device data set to query. - UploadID string - // Start limits the data to those recorded after this time. - Start time.Time - // End limits the data to those recorded before this time. - End time.Time -} - -type AlertableResponse struct { - Glucose []*glucose.Glucose - DosingDecisions []*dosingdecision.DosingDecision -} diff --git a/data/store/test/data_repository.go b/data/store/test/data_repository.go index ff5a857bcd..e1dd86df33 100644 --- a/data/store/test/data_repository.go +++ b/data/store/test/data_repository.go @@ -8,6 +8,7 @@ import ( "github.com/onsi/gomega" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data" dataStore "github.com/tidepool-org/platform/data/store" "github.com/tidepool-org/platform/data/types/upload" @@ -182,11 +183,11 @@ type DistinctUserIDsOutput struct { type GetAlertableDataInput struct { Context context.Context - Params dataStore.AlertableParams + Params alerts.GetAlertableDataParams } type GetAlertableDataOutput struct { - Response *dataStore.AlertableResponse + Response *alerts.GetAlertableDataResponse Error error } @@ -528,7 +529,7 @@ func (d *DataRepository) DistinctUserIDs(ctx context.Context, typ []string) ([]s return output.UserIDs, output.Error } -func (d *DataRepository) GetAlertableData(ctx context.Context, params dataStore.AlertableParams) (*dataStore.AlertableResponse, error) { +func (d *DataRepository) GetAlertableData(ctx context.Context, params alerts.GetAlertableDataParams) (*alerts.GetAlertableDataResponse, error) { d.GetAlertableDataInvocations++ d.GetAlertableDataInputs = append(d.GetAlertableDataInputs, GetAlertableDataInput{Context: ctx, Params: params}) diff --git a/devicetokens/devicetokens.go b/devicetokens/devicetokens.go index a8fb790a3d..fc901187f7 100644 --- a/devicetokens/devicetokens.go +++ b/devicetokens/devicetokens.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha256" "encoding/hex" + "encoding/json" "fmt" "github.com/tidepool-org/platform/structure" @@ -52,6 +53,14 @@ type DeviceToken struct { Apple *AppleDeviceToken `json:"apple,omitempty" bson:"apple,omitempty"` } +func (t DeviceToken) String() string { + b, err := json.Marshal(t) + if err != nil { + return "" + } + return string(b) +} + // key provides a unique string value to identify this device token. // // Intended to be used as part of a unique index for database indexes. @@ -100,6 +109,7 @@ type AppleBlob []byte // Repository abstracts persistent storage for Token data. type Repository interface { + GetAllByUserID(ctx context.Context, userID string) ([]*Document, error) Upsert(ctx context.Context, doc *Document) error EnsureIndexes() error diff --git a/dexcom/fetch/test/mock.go b/dexcom/fetch/test/mock.go index 1ea79b2471..65e6c878a1 100644 --- a/dexcom/fetch/test/mock.go +++ b/dexcom/fetch/test/mock.go @@ -10,7 +10,6 @@ import ( time "time" gomock "github.com/golang/mock/gomock" - auth "github.com/tidepool-org/platform/auth" data "github.com/tidepool-org/platform/data" source "github.com/tidepool-org/platform/data/source" diff --git a/go.mod b/go.mod index 1ada6b1ad3..759f978210 100644 --- a/go.mod +++ b/go.mod @@ -21,18 +21,19 @@ require ( github.com/onsi/gomega v1.33.1 github.com/prometheus/client_golang v1.19.1 github.com/rinchsan/device-check-go v1.3.0 + github.com/sideshow/apns2 v0.23.0 github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 - github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c + github.com/tidepool-org/go-common v0.12.2 github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace github.com/urfave/cli v1.22.15 go.mongodb.org/mongo-driver v1.16.0 go.uber.org/fx v1.22.1 - golang.org/x/crypto v0.24.0 + golang.org/x/crypto v0.32.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/oauth2 v0.21.0 - golang.org/x/sync v0.7.0 + golang.org/x/sync v0.10.0 golang.org/x/tools v0.22.0 gonum.org/v1/gonum v0.15.0 google.golang.org/grpc v1.65.0 @@ -98,10 +99,10 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.18.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect google.golang.org/protobuf v1.34.2 // indirect diff --git a/go.sum b/go.sum index 389228ddfe..44733686bc 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/ant0ine/go-json-rest v3.3.2+incompatible h1:nBixrkLFiDNAW0hauKDLc8yJI6XfrQumWvytE1Hk14E= github.com/ant0ine/go-json-rest v3.3.2+incompatible/go.mod h1:q6aCt0GfU6LhpBsnZ/2U+mwe+0XB5WStbmwyoPfc+sk= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= @@ -53,6 +55,7 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -162,6 +165,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sideshow/apns2 v0.23.0 h1:lpkikaZ995GIcKk6AFsYzHyezCrsrfEDvUWcWkEGErY= +github.com/sideshow/apns2 v0.23.0/go.mod h1:7Fceu+sL0XscxrfLSkAoH6UtvKefq3Kq1n4W3ayQZqE= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -180,8 +185,8 @@ github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 h1:fTIg github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2/go.mod h1:7BpAdFdGJNB3aw/xvCz5XnWjSWRoUtWIX4xcMc4Bsko= github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 h1:1kiZtHhs++yXayRD/Mh/3POLwtmxV99YR2bSCle1Q74= github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5/go.mod h1:xuQ8k0mLR1ZyEmwe/m0v2BuXctqQuCZeR43urSQpTUM= -github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c h1:hJZyiHNGeqyLA/5p60/0H9CZtJi4fAuzOuyQF0TpF7E= -github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c/go.mod h1:mIzYteUyPf//fhee4e2KEZhmcm2iE4IQ/2dyQr5pRKA= +github.com/tidepool-org/go-common v0.12.2 h1:3mse3wJtq5irbgdCz3LeEfs8XE9oDX9kzDcHuWNW/jw= +github.com/tidepool-org/go-common v0.12.2/go.mod h1:BeqsQcDwfSsmnmc+/N/EOT8h3m8/YtqrLNykk5kGkv4= github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace h1:L0UiCj2eL/NOpLa19Tf5IgoK6feILmdA+zK3nCTIhqU= github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace/go.mod h1:gon+x+jAh8DZZ2hD23fBWqrYwOizVSwIBbxEsuXCbZ4= github.com/ugorji/go v1.2.4/go.mod h1:EuaSCk8iZMdIspsu6HXH7X2UGKw1ezO4wCfGszGmmo4= @@ -218,12 +223,13 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20170512130425-ab89591268e0/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= @@ -238,18 +244,19 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -259,25 +266,26 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -300,6 +308,7 @@ google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/log/devlog/devlog.go b/log/devlog/devlog.go index 2800cfd2a0..f1dc98193b 100644 --- a/log/devlog/devlog.go +++ b/log/devlog/devlog.go @@ -15,6 +15,7 @@ import ( "fmt" "io" stdlog "log" + "os" "sort" "strings" "time" @@ -83,7 +84,20 @@ func (s *serializer) Serialize(fields log.Fields) error { if len(pairs) > 0 { rest = ": " + strings.Join(pairs, " ") } - s.Logger.Printf(msgTime + " " + msgLevel + " " + msg + rest) + prefixes := []string{} + prefixes = append(prefixes, msgTime) + // HOSTNAME is set on Kubernetes pods and is useful for distinguishing logs from an + // outgoing Pod vs a newly created Pod. + if h := os.Getenv("HOSTNAME"); h != "" { + pieces := strings.Split(h, "-") + if len(pieces) > 0 { + prefixes = append(prefixes, pieces[len(pieces)-1]) + } else { + prefixes = append(prefixes, h) + } + } + prefixes = append(prefixes, msgLevel) + s.Logger.Print(strings.Join(prefixes, " ") + " " + msg + rest) return nil } diff --git a/log/sarama.go b/log/sarama.go new file mode 100644 index 0000000000..d09576c5a6 --- /dev/null +++ b/log/sarama.go @@ -0,0 +1,35 @@ +package log + +import ( + "fmt" + "strings" + + "github.com/IBM/sarama" +) + +// NewSarama returns a [Logger] adapted to implement [sarama.StdLogger]. +func NewSarama(l Logger) sarama.StdLogger { + return &SaramaLogger{Logger: l.WithField("SARAMA", "1")} +} + +// SaramaLogger wraps a [Logger] to implement [sarama.StdLogger]. +// +// Sarama doesn't support the concept of logging levels, so all messages will +// use the info level. +type SaramaLogger struct { + Logger +} + +func (l *SaramaLogger) Print(args ...interface{}) { + l.Logger.Info(fmt.Sprint(args...)) +} + +func (l *SaramaLogger) Printf(format string, args ...interface{}) { + // Sarama log messages sent via this method include a newline, which + // doesn't fit with Logger's style, so remove it. + l.Logger.Infof(strings.TrimSuffix(format, "\n"), args...) +} + +func (l *SaramaLogger) Println(args ...interface{}) { + l.Logger.Info(fmt.Sprint(args...)) +} diff --git a/log/sarama_test.go b/log/sarama_test.go new file mode 100644 index 0000000000..6fccd5e256 --- /dev/null +++ b/log/sarama_test.go @@ -0,0 +1,47 @@ +package log_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("NewSarama", func() { + It("initializes a new sarama log adapter", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + }) + + It("implements Print", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Print("testing 1 2 3") + + testLog.AssertInfo("testing 1 2 3") + }) + + It("implements Printf", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Printf("testing %s", "4 5 6") + + testLog.AssertInfo("testing 4 5 6") + }) + + It("implements Println", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Println("testing 7 8 9") + + testLog.AssertInfo("testing 7 8 9") + }) +}) diff --git a/push/logpush.go b/push/logpush.go new file mode 100644 index 0000000000..41e772260d --- /dev/null +++ b/push/logpush.go @@ -0,0 +1,41 @@ +package push + +import ( + "context" + "os" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" + lognull "github.com/tidepool-org/platform/log/null" +) + +// LogPusher logs notifications instead of sending push notifications. +// +// Useful for dev or testing situations. +type LogPusher struct { + log.Logger +} + +// NewLogPusher uses a [log.Logger] instead of pushing via APNs. +func NewLogPusher(l log.Logger) *LogPusher { + if l == nil { + var err error + l, err = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + l = lognull.NewLogger() + } + } + return &LogPusher{Logger: l} +} + +// Push implements [service.Pusher]. +func (p *LogPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, + notification *Notification) error { + + p.Logger.WithFields(log.Fields{ + "deviceToken": deviceToken, + "notification": notification, + }).Info("logging push notification") + return nil +} diff --git a/push/logpush_test.go b/push/logpush_test.go new file mode 100644 index 0000000000..f8f8611237 --- /dev/null +++ b/push/logpush_test.go @@ -0,0 +1,50 @@ +package push + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("NewLogPusher", func() { + It("succeeds", func() { + testLog := logtest.NewLogger() + + Expect(NewLogPusher(testLog)).ToNot(Equal(nil)) + }) + + It("implements Push by logging a message", func() { + testLog := logtest.NewLogger() + ctx := context.Background() + testToken := &devicetokens.DeviceToken{} + testNotification := &Notification{} + + pusher := NewLogPusher(testLog) + Expect(pusher).ToNot(Equal(nil)) + + Expect(pusher.Push(ctx, testToken, testNotification)).To(Succeed()) + testFields := log.Fields{ + "deviceToken": testToken, + "notification": testNotification, + } + testLog.AssertInfo("logging push notification", testFields) + }) + + It("handles being passed a nil logger", func() { + ctx := context.Background() + testToken := &devicetokens.DeviceToken{} + testNotification := &Notification{} + + pusher := NewLogPusher(nil) + Expect(pusher).ToNot(Equal(nil)) + + Expect(func() { + Expect(pusher.Push(ctx, testToken, testNotification)).To(Succeed()) + }).ToNot(Panic()) + }) +}) diff --git a/push/push.go b/push/push.go new file mode 100644 index 0000000000..d865e4e3ea --- /dev/null +++ b/push/push.go @@ -0,0 +1,157 @@ +// Package push provides clients for sending mobile device push notifications. +package push + +import ( + "context" + "encoding/hex" + "net/http" + "sync" + + "github.com/sideshow/apns2" + "github.com/sideshow/apns2/payload" + "github.com/sideshow/apns2/token" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" +) + +// Notification models a provider-independent push notification. +type Notification struct { + Message string +} + +// String implements fmt.Stringer. +func (n Notification) String() string { + return n.Message +} + +// APNSPusher implements push notifications via Apple APNs. +type APNSPusher struct { + BundleID string + + client APNS2Client + clientMu sync.Mutex +} + +// NewAPNSPusher creates a Pusher for sending device notifications via Apple's +// APNs. +func NewAPNSPusher(client APNS2Client, bundleID string) *APNSPusher { + return &APNSPusher{ + BundleID: bundleID, + client: client, + } +} + +// NewAPNSPusherFromKeyData creates an APNSPusher for sending device +// notifications via Apple's APNs. +// +// The signingKey is the raw token signing key received from Apple (.p8 file +// containing PEM-encoded private key), along with its respective team id, key +// id, and application bundle id. +// +// https://developer.apple.com/documentation/usernotifications/sending-notification-requests-to-apns +func NewAPNSPusherFromKeyData(signingKey []byte, keyID, teamID, bundleID string) (*APNSPusher, error) { + if len(signingKey) == 0 { + return nil, errors.New("Unable to build APNSPusher: APNs signing key is blank") + } + + if bundleID == "" { + return nil, errors.New("Unable to build APNSPusher: bundleID is blank") + } + + if keyID == "" { + return nil, errors.New("Unable to build APNSPusher: keyID is blank") + } + + if teamID == "" { + return nil, errors.New("Unable to build APNSPusher: teamID is blank") + } + + authKey, err := token.AuthKeyFromBytes(signingKey) + if err != nil { + return nil, err + } + token := &token.Token{ + AuthKey: authKey, + KeyID: keyID, + TeamID: teamID, + } + client := &apns2Client{Client: apns2.NewTokenClient(token)} + return NewAPNSPusher(client, bundleID), nil +} + +func (p *APNSPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, + notification *Notification) error { + + if deviceToken.Apple == nil { + return errors.New("Unable to push notification: APNSPusher can only use Apple device tokens but the Apple token is nil") + } + + hexToken := hex.EncodeToString(deviceToken.Apple.Token) + appleNotification := p.buildAppleNotification(hexToken, notification) + resp, err := p.safePush(ctx, deviceToken.Apple.Environment, appleNotification) + if err != nil { + return errors.Wrap(err, "Unable to push notification") + } + if resp.StatusCode != http.StatusOK { + return errors.Newf("Unable to push notification: APNs returned non-200 status: %d, %s", resp.StatusCode, resp.Reason) + } + if logger := log.LoggerFromContext(ctx); logger != nil { + logger.WithFields(log.Fields{ + "apnsID": resp.ApnsID, + }).Info("notification pushed") + } + + return nil +} + +// safePush guards the environment setup and push method with a mutex. +// +// This prevents the environment from being changed out from under +// you. Unlikely, but better safe than sorry. +func (p *APNSPusher) safePush(ctx context.Context, env string, notification *apns2.Notification) ( + *apns2.Response, error) { + + p.clientMu.Lock() + defer p.clientMu.Unlock() + if env == devicetokens.AppleEnvProduction { + p.client.Production() + } else { + p.client.Development() + } + return p.client.PushWithContext(ctx, notification) +} + +func (p *APNSPusher) buildAppleNotification(hexToken string, notification *Notification) *apns2.Notification { + payload := payload.NewPayload(). + Alert(notification.Message). + AlertBody(notification.Message) + return &apns2.Notification{ + DeviceToken: hexToken, + Payload: payload, + Topic: p.BundleID, + } +} + +// APNS2Client abstracts the apns2 library for easier testing. +type APNS2Client interface { + Development() APNS2Client + Production() APNS2Client + PushWithContext(apns2.Context, *apns2.Notification) (*apns2.Response, error) +} + +// apns2Client adapts the apns2.Client to APNS2Client so it can be replaced for testing. +type apns2Client struct { + *apns2.Client +} + +func (c apns2Client) Development() APNS2Client { + d := c.Client.Development() + return &apns2Client{Client: d} +} + +func (c apns2Client) Production() APNS2Client { + p := c.Client.Production() + return &apns2Client{Client: p} +} diff --git a/push/push_suite_test.go b/push/push_suite_test.go new file mode 100644 index 0000000000..a5b73e9d49 --- /dev/null +++ b/push/push_suite_test.go @@ -0,0 +1,11 @@ +package push + +import ( + "testing" + + "github.com/tidepool-org/platform/test" +) + +func TestSuite(t *testing.T) { + test.Test(t) +} diff --git a/push/push_test.go b/push/push_test.go new file mode 100644 index 0000000000..11496ffabc --- /dev/null +++ b/push/push_test.go @@ -0,0 +1,191 @@ +package push + +import ( + "context" + "fmt" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/sideshow/apns2" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + testlog "github.com/tidepool-org/platform/log/test" +) + +const ( + testBundleID = "test-bundle-id" +) + +var ( + testDeviceToken []byte = []byte("dGVzdGluZyAxIDIgMw==") +) + +type pushTestDeps struct { + Client *mockAPNS2Client + Token *devicetokens.DeviceToken + Notification *Notification +} + +func testDeps() (context.Context, *APNSPusher, *pushTestDeps) { + ctx := context.Background() + mockClient := &mockAPNS2Client{ + Response: &apns2.Response{ + StatusCode: http.StatusOK, + }, + } + pusher := NewAPNSPusher(mockClient, testBundleID) + deps := &pushTestDeps{ + Client: mockClient, + Token: &devicetokens.DeviceToken{ + Apple: &devicetokens.AppleDeviceToken{ + Token: testDeviceToken, + }, + }, + Notification: &Notification{}, + } + return ctx, pusher, deps +} + +var _ = Describe("APNSPusher", func() { + Describe("NewAPNSPusherFromKeyData", func() { + It("errors if key data is empty or blank", func() { + _, err := NewAPNSPusherFromKeyData([]byte(""), "key", "team", "bundle") + Expect(err).To(MatchError(ContainSubstring("APNs signing key is blank"))) + + _, err = NewAPNSPusherFromKeyData(nil, "key", "team", "bundle") + Expect(err).To(MatchError(ContainSubstring("APNs signing key is blank"))) + }) + + It("errors if key data is invalid", func() { + _, err := NewAPNSPusherFromKeyData([]byte("foo"), "key", "team", "bundle") + Expect(err).To(MatchError(ContainSubstring("AuthKey must be a valid .p8 PEM file"))) + }) + + It("errors if bundleID is blank", func() { + _, err := NewAPNSPusherFromKeyData([]byte("hi"), "key", "team", "") + Expect(err).To(MatchError(ContainSubstring("bundleID is blank"))) + }) + + It("errors if teamID is blank", func() { + _, err := NewAPNSPusherFromKeyData([]byte("hi"), "key", "", "bundle") + Expect(err).To(MatchError(ContainSubstring("teamID is blank"))) + }) + + It("errors if keyID is blank", func() { + _, err := NewAPNSPusherFromKeyData([]byte("hi"), "", "team", "bundle") + Expect(err).To(MatchError(ContainSubstring("keyID is blank"))) + }) + + It("succeeds", func() { + // random private key for testing + data := []byte(`-----BEGIN PRIVATE KEY----- +MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDDNrXT9ZRWPUAAg38Qi +Z553y7sGqOgMxUCG36eCIcRCy1QiTJBgGDxIhWvkE8Sx4N6hZANiAATrsRyRXLa0 +Tgczq8tmFomMP212HdkPF3gFEl/CkqGHUodR2EdZBW1zVcmuLjIN4zvqVVXMJm/U +eHZz9xAZ95y3irAfkMuOD/Bw88UYvhKnipOHBeS8BwqyfFQ+NRB6xYU= +-----END PRIVATE KEY----- +`) + pusher, err := NewAPNSPusherFromKeyData(data, "key", "team", "bundle") + Expect(err).To(Succeed()) + Expect(pusher).ToNot(Equal(nil)) + }) + }) + + Describe("Push", func() { + It("requires an Apple token", func() { + ctx, pusher, deps := testDeps() + deps.Token.Apple = nil + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("can only use Apple device tokens"))) + }) + + Context("its environment", func() { + + for _, env := range []string{devicetokens.AppleEnvProduction, devicetokens.AppleEnvSandbox} { + It("is set via its token", func() { + ctx, pusher, deps := testDeps() + deps.Token.Apple.Environment = env + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(Succeed()) + // This is reaching into the implementation of + // APNS2Client, but there's no other way to test this. + Expect(deps.Client.Env).To(Equal(env)) + }) + } + }) + + It("reports upstream errors", func() { + ctx, pusher, deps := testDeps() + deps.Client.Error = fmt.Errorf("test error") + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("test error"))) + }) + + Context("when a logger is available", func() { + It("logs", func() { + ctx, pusher, deps := testDeps() + testLogger := testlog.NewLogger() + ctx = log.NewContextWithLogger(ctx, testLogger) + deps.Client.Response = &apns2.Response{ + StatusCode: http.StatusOK, + ApnsID: "test-id", + } + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(Succeed()) + testLogger.AssertInfo("notification pushed", log.Fields{ + "apnsID": "test-id", + }) + }) + }) + + It("reports non-200 responses as errors", func() { + ctx, pusher, deps := testDeps() + deps.Client.Response = &apns2.Response{ + StatusCode: http.StatusBadRequest, + } + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("APNs returned non-200 status"))) + }) + }) +}) + +type mockAPNS2Client struct { + Response *apns2.Response + Error error + Env string +} + +func (c *mockAPNS2Client) Development() APNS2Client { + c.Env = devicetokens.AppleEnvSandbox + return c +} + +func (c *mockAPNS2Client) Production() APNS2Client { + c.Env = devicetokens.AppleEnvProduction + return c +} + +func (c *mockAPNS2Client) PushWithContext(_ apns2.Context, _ *apns2.Notification) (*apns2.Response, error) { + if c.Error != nil { + return nil, c.Error + } + if c.Response != nil { + return c.Response, nil + } + return nil, nil +} diff --git a/task/queue/queue.go b/task/queue/queue.go index 7e3dfe0203..39f50524ad 100644 --- a/task/queue/queue.go +++ b/task/queue/queue.go @@ -374,7 +374,10 @@ func (q *queue) completeTask(ctx context.Context, tsk *task.Task) { func (q *queue) computeState(tsk *task.Task) { switch tsk.State { case task.TaskStatePending: - if tsk.AvailableTime == nil || time.Now().After(*tsk.AvailableTime) { + now := time.Now() + if tsk.AvailableTime == nil || tsk.AvailableTime.Before(now) { + tsk.AvailableTime = &now + } else if time.Now().After(*tsk.AvailableTime) { tsk.AppendError(errors.New("pending task requires future available time")) tsk.SetFailed() } diff --git a/task/service/service/service.go b/task/service/service/service.go index 40ed741686..dbd5da9695 100644 --- a/task/service/service/service.go +++ b/task/service/service/service.go @@ -3,21 +3,27 @@ package service import ( "context" - "github.com/tidepool-org/platform/clinics" - "github.com/tidepool-org/platform/ehr/reconcile" - "github.com/tidepool-org/platform/ehr/sync" + "github.com/kelseyhightower/envconfig" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/application" "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/clinics" dataClient "github.com/tidepool-org/platform/data/client" + "github.com/tidepool-org/platform/data/events" dataSource "github.com/tidepool-org/platform/data/source" dataSourceClient "github.com/tidepool-org/platform/data/source/client" "github.com/tidepool-org/platform/dexcom" dexcomClient "github.com/tidepool-org/platform/dexcom/client" dexcomFetch "github.com/tidepool-org/platform/dexcom/fetch" dexcomProvider "github.com/tidepool-org/platform/dexcom/provider" + "github.com/tidepool-org/platform/ehr/reconcile" + "github.com/tidepool-org/platform/ehr/sync" "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/permission" + permissionClient "github.com/tidepool-org/platform/permission/client" "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/push" serviceService "github.com/tidepool-org/platform/service/service" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" "github.com/tidepool-org/platform/task" @@ -39,6 +45,9 @@ type Service struct { dexcomClient dexcom.Client taskQueue queue.Queue clinicsClient clinics.Client + alertsClient *alerts.Client + pusher events.Pusher + permissionClient permission.Client } func New() *Service { @@ -70,6 +79,15 @@ func (s *Service) Initialize(provider application.Provider) error { if err := s.initializeClinicsClient(); err != nil { return err } + if err := s.initializeAlertsClient(); err != nil { + return err + } + if err := s.initializePusher(); err != nil { + return err + } + if err := s.initializePermissionClient(); err != nil { + return err + } if err := s.initializeTaskQueue(); err != nil { return err } @@ -346,6 +364,17 @@ func (s *Service) initializeTaskQueue() error { } runners = append(runners, ehrSyncRnnr) + if s.alertsClient == nil { + s.Logger().Info("alerts client is nil; care partner tasks will not run successfully") + } + + carePartnerRunner, err := alerts.NewCarePartnerRunner(s.Logger(), s.alertsClient, + s.AuthClient(), s.pusher, s.permissionClient, s.AuthClient()) + if err != nil { + return errors.Wrap(err, "unable to create care partner runner") + } + runners = append(runners, carePartnerRunner) + for _, r := range runners { r := r if err := taskQueue.RegisterRunner(r); err != nil { @@ -359,6 +388,75 @@ func (s *Service) initializeTaskQueue() error { return nil } +func (s *Service) initializeAlertsClient() error { + s.Logger().Debug("initializing alerts client") + + platformConfig := platform.NewConfig() + platformConfig.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("data", "client") + loader := platform.NewConfigReporterLoader(reporter) + if err := platformConfig.Load(loader); err != nil { + return errors.Wrap(err, "Unable to load alerts client config") + } + + s.Logger().Debug("Creating alerts client") + + platformClient, err := platform.NewClient(platformConfig, platform.AuthorizeAsService) + if err != nil { + return errors.Wrap(err, "Unable to create platform client for use in alerts client") + } + s.alertsClient = alerts.NewClient(platformClient, s.Logger()) + + return nil +} + +func (s *Service) initializePusher() error { + var err error + + apns2Config := &struct { + SigningKey []byte `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_SIGNING_KEY"` + KeyID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_KEY_ID"` + BundleID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_BUNDLE_ID"` + TeamID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_TEAM_ID"` + }{} + if err := envconfig.Process("", apns2Config); err != nil { + return errors.Wrap(err, "Unable to process APNs pusher config") + } + + var pusher events.Pusher + pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, + apns2Config.TeamID, apns2Config.BundleID) + if err != nil { + s.Logger().WithError(err).Warn("falling back to logging of push notifications") + pusher = push.NewLogPusher(s.Logger()) + } + s.pusher = pusher + + return nil +} + +func (s *Service) initializePermissionClient() error { + s.Logger().Debug("Loading permission client config") + + cfg := platform.NewConfig() + cfg.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("permission", "client") + loader := platform.NewConfigReporterLoader(reporter) + if err := cfg.Load(loader); err != nil { + return errors.Wrap(err, "unable to load permission client config") + } + + s.Logger().Debug("Creating permission client") + + clnt, err := permissionClient.New(cfg, platform.AuthorizeAsService) + if err != nil { + return errors.Wrap(err, "unable to create permission client") + } + s.permissionClient = clnt + + return nil +} + func (s *Service) terminateTaskQueue() { if s.taskQueue != nil { s.Logger().Debug("Stopping task queue") diff --git a/task/service/service/service_test.go b/task/service/service/service_test.go index 8fe529cca5..71314e3c63 100644 --- a/task/service/service/service_test.go +++ b/task/service/service/service_test.go @@ -35,12 +35,12 @@ var _ = Describe("Service", func() { var dataClientConfig map[string]interface{} var dataSourceClientConfig map[string]interface{} var taskStoreConfig map[string]interface{} + var permissionClientConfig map[string]interface{} var taskServiceConfig map[string]interface{} var service *taskServiceService.Service BeforeEach(func() { provider = applicationTest.NewProviderWithDefaults() - serverSecret = authTest.NewServiceSecret() sessionToken = authTest.NewSessionToken() server = NewServer() @@ -69,6 +69,9 @@ var _ = Describe("Service", func() { "address": server.URL(), "server_token_secret": authTest.NewServiceSecret(), } + permissionClientConfig = map[string]interface{}{ + "address": server.URL(), + } taskStoreConfig = map[string]interface{}{ "addresses": os.Getenv("TIDEPOOL_STORE_ADDRESSES"), "database": test.RandomStringFromRangeAndCharset(4, 8, test.CharsetLowercase), @@ -88,6 +91,9 @@ var _ = Describe("Service", func() { "task": map[string]interface{}{ "store": taskStoreConfig, }, + "permission": map[string]interface{}{ + "client": permissionClientConfig, + }, "secret": authTest.NewServiceSecret(), "server": map[string]interface{}{ "address": testHttp.NewAddress(), diff --git a/task/store/mongo/mongo.go b/task/store/mongo/mongo.go index c36cbf1105..8232d55300 100644 --- a/task/store/mongo/mongo.go +++ b/task/store/mongo/mongo.go @@ -10,6 +10,7 @@ import ( "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/ehr/reconcile" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" @@ -81,6 +82,7 @@ func (s *Store) EnsureDefaultTasks() error { repository.EnsureSummaryBackfillTask, repository.EnsureSummaryMigrationTask, repository.EnsureEHRReconcileTask, + repository.EnsureCarePartnerTask, } for _, f := range fs { @@ -176,6 +178,11 @@ func (t *TaskRepository) EnsureEHRReconcileTask(ctx context.Context) error { return t.ensureTask(ctx, create) } +func (t *TaskRepository) EnsureCarePartnerTask(ctx context.Context) error { + create := alerts.NewCarePartnerTaskCreate() + return t.ensureTask(ctx, create) +} + func (t *TaskRepository) ensureTask(ctx context.Context, create *task.TaskCreate) error { tsk, err := task.NewTask(ctx, create) if err != nil { diff --git a/task/store/mongo/mongo_test.go b/task/store/mongo/mongo_test.go index 756cc33ed0..9c16d09aab 100644 --- a/task/store/mongo/mongo_test.go +++ b/task/store/mongo/mongo_test.go @@ -8,7 +8,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" - "github.com/prometheus/client_golang/prometheus/testutil" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" diff --git a/task/test/mock.go b/task/test/mock.go index 7c06ef90b3..c702095423 100644 --- a/task/test/mock.go +++ b/task/test/mock.go @@ -9,7 +9,6 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - page "github.com/tidepool-org/platform/page" task "github.com/tidepool-org/platform/task" ) diff --git a/vendor/github.com/IBM/sarama/mocks/README.md b/vendor/github.com/IBM/sarama/mocks/README.md new file mode 100644 index 0000000000..9f40ae2ff7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/README.md @@ -0,0 +1,13 @@ +# sarama/mocks + +The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. +You can use them to test your sarama applications using dependency injection. + +The following mock objects are available: + +- [Consumer](https://pkg.go.dev/github.com/IBM/sarama/mocks#Consumer), which will create [PartitionConsumer](https://pkg.go.dev/github.com/IBM/sarama/mocks#PartitionConsumer) mocks. +- [AsyncProducer](https://pkg.go.dev/github.com/IBM/sarama/mocks#AsyncProducer) +- [SyncProducer](https://pkg.go.dev/github.com/IBM/sarama/mocks#SyncProducer) + +The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, +and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/vendor/github.com/IBM/sarama/mocks/async_producer.go b/vendor/github.com/IBM/sarama/mocks/async_producer.go new file mode 100644 index 0000000000..89e0e0db99 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/async_producer.go @@ -0,0 +1,272 @@ +package mocks + +import ( + "errors" + "sync" + + "github.com/IBM/sarama" +) + +// AsyncProducer implements sarama's Producer interface for testing purposes. +// Before you can send messages to it's Input channel, you have to set expectations +// so it knows how to handle the input; it returns an error if the number of messages +// received is bigger then the number of expectations set. You can also set a +// function in each expectation so that the message is checked by this function and +// an error is returned if the match fails. +type AsyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + closed chan struct{} + input chan *sarama.ProducerMessage + successes chan *sarama.ProducerMessage + errors chan *sarama.ProducerError + isTransactional bool + txnLock sync.Mutex + txnStatus sarama.ProducerTxnStatusFlag + lastOffset int64 + *TopicConfig +} + +// NewAsyncProducer instantiates a new Producer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is validated and used to determine +// whether it should ack successes on the Successes channel and handle partitioning. +func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + mp := &AsyncProducer{ + t: t, + closed: make(chan struct{}), + expectations: make([]*producerExpectation, 0), + input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), + isTransactional: config.Producer.Transaction.ID != "", + txnStatus: sarama.ProducerTxnFlagReady, + TopicConfig: NewTopicConfig(), + } + + go func() { + defer func() { + close(mp.successes) + close(mp.errors) + close(mp.closed) + }() + + partitioners := make(map[string]sarama.Partitioner, 1) + + for msg := range mp.input { + mp.txnLock.Lock() + if mp.IsTransactional() && mp.txnStatus&sarama.ProducerTxnFlagInTransaction == 0 { + mp.t.Errorf("attempt to send message when transaction is not started or is in ending state.") + mp.errors <- &sarama.ProducerError{Err: errors.New("attempt to send message when transaction is not started or is in ending state"), Msg: msg} + continue + } + mp.txnLock.Unlock() + partitioner := partitioners[msg.Topic] + if partitioner == nil { + partitioner = config.Producer.Partitioner(msg.Topic) + partitioners[msg.Topic] = partitioner + } + mp.l.Lock() + if mp.expectations == nil || len(mp.expectations) == 0 { + mp.expectations = nil + mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + } else { + expectation := mp.expectations[0] + mp.expectations = mp.expectations[1:] + + partition, err := partitioner.Partition(msg, mp.partitions(msg.Topic)) + if err != nil { + mp.t.Errorf("Partitioner returned an error: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } else { + msg.Partition = partition + if expectation.CheckFunction != nil { + err := expectation.CheckFunction(msg) + if err != nil { + mp.t.Errorf("Check function returned an error: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } + } + if errors.Is(expectation.Result, errProduceSuccess) { + mp.lastOffset++ + if config.Producer.Return.Successes { + msg.Offset = mp.lastOffset + mp.successes <- msg + } + } else if config.Producer.Return.Errors { + mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} + } + } + } + mp.l.Unlock() + } + + mp.l.Lock() + if len(mp.expectations) > 0 { + mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) + } + mp.l.Unlock() + }() + + return mp +} + +//////////////////////////////////////////////// +// Implement Producer interface +//////////////////////////////////////////////// + +// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) AsyncClose() { + close(mp.input) +} + +// Close corresponds with the Close method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) Close() error { + mp.AsyncClose() + <-mp.closed + return nil +} + +// Input corresponds with the Input method of sarama's Producer implementation. +// You have to set expectations on the mock producer before writing messages to the Input +// channel, so it knows how to handle them. If there is no more remaining expectations and +// a messages is written to the Input channel, the mock producer will write an error to the test +// state object. +func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { + return mp.input +} + +// Successes corresponds with the Successes method of sarama's Producer implementation. +func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { + return mp.successes +} + +// Errors corresponds with the Errors method of sarama's Producer implementation. +func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { + return mp.errors +} + +func (mp *AsyncProducer) IsTransactional() bool { + return mp.isTransactional +} + +func (mp *AsyncProducer) BeginTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagInTransaction + return nil +} + +func (mp *AsyncProducer) CommitTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (mp *AsyncProducer) AbortTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (mp *AsyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + return mp.txnStatus +} + +func (mp *AsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + return nil +} + +func (mp *AsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectInputWithMessageCheckerFunctionAndSucceed sets an expectation on the mock producer that a +// message will be provided on the input channel. The mock producer will call the given function to +// check the message. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it produced successfully, i.e. it will make it +// available on the Successes channel if the Producer.Return.Successes setting is set to true. +func (mp *AsyncProducer) ExpectInputWithMessageCheckerFunctionAndSucceed(cf MessageChecker) *AsyncProducer { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) + + return mp +} + +// ExpectInputWithMessageCheckerFunctionAndFail sets an expectation on the mock producer that a +// message will be provided on the input channel. The mock producer will first call the given +// function to check the message. If an error is returned it will be made available on the Errors +// channel otherwise the mock will handle the message as if it failed to produce successfully. This +// means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputWithMessageCheckerFunctionAndFail(cf MessageChecker, err error) *AsyncProducer { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) + + return mp +} + +// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will call the given function to check +// the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it produced successfully, i.e. it will make +// it available on the Successes channel if the Producer.Return.Successes setting is set to true. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndSucceed(messageValueChecker(cf)) + + return mp +} + +// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will first call the given function to +// check the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it failed to produce successfully. This means +// it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndFail(messageValueChecker(cf), err) + + return mp +} + +// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it is produced successfully, +// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting +// is set to true. +func (mp *AsyncProducer) ExpectInputAndSucceed() *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndSucceed(nil) + + return mp +} + +// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it failed to produce +// successfully. This means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputAndFail(err error) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndFail(nil, err) + + return mp +} diff --git a/vendor/github.com/IBM/sarama/mocks/consumer.go b/vendor/github.com/IBM/sarama/mocks/consumer.go new file mode 100644 index 0000000000..77bb9195cb --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/consumer.go @@ -0,0 +1,441 @@ +package mocks + +import ( + "sync" + "sync/atomic" + + "github.com/IBM/sarama" +) + +// Consumer implements sarama's Consumer interface for testing purposes. +// Before you can start consuming from this consumer, you have to register +// topic/partitions using ExpectConsumePartition, and set expectations on them. +type Consumer struct { + l sync.Mutex + t ErrorReporter + config *sarama.Config + partitionConsumers map[string]map[int32]*PartitionConsumer + metadata map[string][]int32 +} + +// NewConsumer returns a new mock Consumer instance. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument can be set to nil; if it is +// non-nil it is validated. +func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + + c := &Consumer{ + t: t, + config: config, + partitionConsumers: make(map[string]map[int32]*PartitionConsumer), + } + return c +} + +/////////////////////////////////////////////////// +// Consumer interface implementation +/////////////////////////////////////////////////// + +// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. +// Before you can start consuming a partition, you have to set expectations on it using +// ExpectConsumePartition. You can only consume a partition once per consumer. +func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { + c.t.Errorf("No expectations set for %s/%d", topic, partition) + return nil, errOutOfExpectations + } + + pc := c.partitionConsumers[topic][partition] + if pc.consumed { + return nil, sarama.ConfigurationError("The topic/partition is already being consumed") + } + + if pc.offset != AnyOffset && pc.offset != offset { + c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) + } + + pc.consumed = true + return pc, nil +} + +// Topics returns a list of topics, as registered with SetTopicMetadata +func (c *Consumer) Topics() ([]string, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetTopicMetadata.") + return nil, sarama.ErrOutOfBrokers + } + + var result []string + for topic := range c.metadata { + result = append(result, topic) + } + return result, nil +} + +// Partitions returns the list of parititons for the given topic, as registered with SetTopicMetadata +func (c *Consumer) Partitions(topic string) ([]int32, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetTopicMetadata.") + return nil, sarama.ErrOutOfBrokers + } + if c.metadata[topic] == nil { + return nil, sarama.ErrUnknownTopicOrPartition + } + + return c.metadata[topic], nil +} + +func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { + c.l.Lock() + defer c.l.Unlock() + + hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) + for topic, partitionConsumers := range c.partitionConsumers { + hwm := make(map[int32]int64, len(partitionConsumers)) + for partition, pc := range partitionConsumers { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +// Close implements the Close method from the sarama.Consumer interface. It will close +// all registered PartitionConsumer instances. +func (c *Consumer) Close() error { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + _ = partitionConsumer.Close() + } + } + + return nil +} + +// Pause implements Consumer. +func (c *Consumer) Pause(topicPartitions map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.partitionConsumers[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Pause() + } + } + } + } +} + +// Resume implements Consumer. +func (c *Consumer) Resume(topicPartitions map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.partitionConsumers[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Resume() + } + } + } + } +} + +// PauseAll implements Consumer. +func (c *Consumer) PauseAll() { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + partitionConsumer.Pause() + } + } +} + +// ResumeAll implements Consumer. +func (c *Consumer) ResumeAll() { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + partitionConsumer.Resume() + } + } +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// SetTopicMetadata sets the clusters topic/partition metadata, +// which will be returned by Topics() and Partitions(). +func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + c.metadata = metadata +} + +// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. +// The registered PartitionConsumer will be returned, so you can set expectations +// on it using method chaining. Once a topic/partition is registered, you are +// expected to start consuming it using ConsumePartition. If that doesn't happen, +// an error will be written to the error reporter once the mock consumer is closed. It also expects +// that the message and error channels be written with YieldMessage and YieldError accordingly, +// and be fully consumed once the mock consumer is closed if ExpectMessagesDrainedOnClose or +// ExpectErrorsDrainedOnClose have been called. +func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil { + c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) + } + + if c.partitionConsumers[topic][partition] == nil { + highWatermarkOffset := offset + if offset == sarama.OffsetOldest { + highWatermarkOffset = 0 + } + + c.partitionConsumers[topic][partition] = &PartitionConsumer{ + highWaterMarkOffset: highWatermarkOffset, + t: c.t, + topic: topic, + partition: partition, + offset: offset, + messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + suppressedMessages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), + } + } + + return c.partitionConsumers[topic][partition] +} + +/////////////////////////////////////////////////// +// PartitionConsumer mock type +/////////////////////////////////////////////////// + +// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. +// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is +// registered first using the Consumer's ExpectConsumePartition method. Before consuming the +// Errors and Messages channel, you should specify what values will be provided on these +// channels using YieldMessage and YieldError. +type PartitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + suppressedHighWaterMarkOffset int64 + l sync.Mutex + t ErrorReporter + topic string + partition int32 + offset int64 + messages chan *sarama.ConsumerMessage + suppressedMessages chan *sarama.ConsumerMessage + errors chan *sarama.ConsumerError + singleClose sync.Once + consumed bool + errorsShouldBeDrained bool + messagesShouldBeDrained bool + paused bool +} + +/////////////////////////////////////////////////// +// PartitionConsumer interface implementation +/////////////////////////////////////////////////// + +// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) AsyncClose() { + pc.singleClose.Do(func() { + close(pc.suppressedMessages) + close(pc.messages) + close(pc.errors) + }) +} + +// Close implements the Close method from the sarama.PartitionConsumer interface. It will +// verify whether the partition consumer was actually started. +func (pc *PartitionConsumer) Close() error { + if !pc.consumed { + pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) + return errPartitionConsumerNotStarted + } + + if pc.errorsShouldBeDrained && len(pc.errors) > 0 { + pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) + } + + if pc.messagesShouldBeDrained && len(pc.messages) > 0 { + pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) + } + + pc.AsyncClose() + + var ( + closeErr error + wg sync.WaitGroup + ) + + wg.Add(1) + go func() { + defer wg.Done() + + errs := make(sarama.ConsumerErrors, 0) + for err := range pc.errors { + errs = append(errs, err) + } + + if len(errs) > 0 { + closeErr = errs + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for range pc.messages { + // drain + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for range pc.suppressedMessages { + // drain + } + }() + + wg.Wait() + return closeErr +} + +// Errors implements the Errors method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { + return pc.errors +} + +// Messages implements the Messages method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { + return pc.messages +} + +func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&pc.highWaterMarkOffset) +} + +// Pause implements the Pause method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Pause() { + pc.l.Lock() + defer pc.l.Unlock() + + pc.suppressedHighWaterMarkOffset = atomic.LoadInt64(&pc.highWaterMarkOffset) + + pc.paused = true +} + +// Resume implements the Resume method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Resume() { + pc.l.Lock() + defer pc.l.Unlock() + + pc.highWaterMarkOffset = atomic.LoadInt64(&pc.suppressedHighWaterMarkOffset) + for len(pc.suppressedMessages) > 0 { + msg := <-pc.suppressedMessages + pc.messages <- msg + } + + pc.paused = false +} + +// IsPaused implements the IsPaused method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) IsPaused() bool { + pc.l.Lock() + defer pc.l.Unlock() + + return pc.paused +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// YieldMessage will yield a messages Messages channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this +// message was consumed from the Messages channel, because there are legitimate +// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will +// verify that the channel is empty on close. +func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) *PartitionConsumer { + pc.l.Lock() + defer pc.l.Unlock() + + msg.Topic = pc.topic + msg.Partition = pc.partition + + if pc.paused { + msg.Offset = atomic.AddInt64(&pc.suppressedHighWaterMarkOffset, 1) - 1 + pc.suppressedMessages <- msg + } else { + msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) - 1 + pc.messages <- msg + } + + return pc +} + +// YieldError will yield an error on the Errors channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this error was +// consumed from the Errors channel, because there are legitimate reasons for this +// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that +// the channel is empty on close. +func (pc *PartitionConsumer) YieldError(err error) *PartitionConsumer { + pc.errors <- &sarama.ConsumerError{ + Topic: pc.topic, + Partition: pc.partition, + Err: err, + } + + return pc +} + +// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer +// that the messages channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() *PartitionConsumer { + pc.messagesShouldBeDrained = true + + return pc +} + +// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer +// that the errors channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() *PartitionConsumer { + pc.errorsShouldBeDrained = true + + return pc +} diff --git a/vendor/github.com/IBM/sarama/mocks/mocks.go b/vendor/github.com/IBM/sarama/mocks/mocks.go new file mode 100644 index 0000000000..bd9d630ddb --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/mocks.go @@ -0,0 +1,110 @@ +/* +Package mocks provides mocks that can be used for testing applications +that use Sarama. The mock types provided by this package implement the +interfaces Sarama exports, so you can use them for dependency injection +in your tests. + +All mock instances require you to set expectations on them before you +can use them. It will determine how the mock will behave. If an +expectation is not met, it will make your test fail. + +NOTE: this package currently does not fall under the API stability +guarantee of Sarama as it is still considered experimental. +*/ +package mocks + +import ( + "errors" + "fmt" + + "github.com/IBM/sarama" +) + +// ErrorReporter is a simple interface that includes the testing.T methods we use to report +// expectation violations when using the mock objects. +type ErrorReporter interface { + Errorf(string, ...interface{}) +} + +// ValueChecker is a function type to be set in each expectation of the producer mocks +// to check the value passed. +type ValueChecker func(val []byte) error + +// MessageChecker is a function type to be set in each expectation of the producer mocks +// to check the message passed. +type MessageChecker func(*sarama.ProducerMessage) error + +// messageValueChecker wraps a ValueChecker into a MessageChecker. +// Failure to encode the message value will return an error and not call +// the wrapped ValueChecker. +func messageValueChecker(f ValueChecker) MessageChecker { + if f == nil { + return nil + } + return func(msg *sarama.ProducerMessage) error { + val, err := msg.Value.Encode() + if err != nil { + return fmt.Errorf("Input message encoding failed: %w", err) + } + return f(val) + } +} + +var ( + errProduceSuccess error = nil + errOutOfExpectations = errors.New("No more expectations set on mock") + errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") +) + +const AnyOffset int64 = -1000 + +type producerExpectation struct { + Result error + CheckFunction MessageChecker +} + +// TopicConfig describes a mock topic structure for the mock producers’ partitioning needs. +type TopicConfig struct { + overridePartitions map[string]int32 + defaultPartitions int32 +} + +// NewTopicConfig makes a configuration which defaults to 32 partitions for every topic. +func NewTopicConfig() *TopicConfig { + return &TopicConfig{ + overridePartitions: make(map[string]int32, 0), + defaultPartitions: 32, + } +} + +// SetDefaultPartitions sets the number of partitions any topic not explicitly configured otherwise +// (by SetPartitions) will have from the perspective of created partitioners. +func (pc *TopicConfig) SetDefaultPartitions(n int32) { + pc.defaultPartitions = n +} + +// SetPartitions sets the number of partitions the partitioners will see for specific topics. This +// only applies to messages produced after setting them. +func (pc *TopicConfig) SetPartitions(partitions map[string]int32) { + for p, n := range partitions { + pc.overridePartitions[p] = n + } +} + +func (pc *TopicConfig) partitions(topic string) int32 { + if n, found := pc.overridePartitions[topic]; found { + return n + } + return pc.defaultPartitions +} + +// NewTestConfig returns a config meant to be used by tests. +// Due to inconsistencies with the request versions the clients send using the default Kafka version +// and the response versions our mocks use, we default to the minimum Kafka version in most tests +func NewTestConfig() *sarama.Config { + config := sarama.NewConfig() + config.Consumer.Retry.Backoff = 0 + config.Producer.Retry.Backoff = 0 + config.Version = sarama.MinVersion + return config +} diff --git a/vendor/github.com/IBM/sarama/mocks/sync_producer.go b/vendor/github.com/IBM/sarama/mocks/sync_producer.go new file mode 100644 index 0000000000..9d103ed0d7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/sync_producer.go @@ -0,0 +1,264 @@ +package mocks + +import ( + "errors" + "sync" + + "github.com/IBM/sarama" +) + +// SyncProducer implements sarama's SyncProducer interface for testing purposes. +// Before you can use it, you have to set expectations on the mock SyncProducer +// to tell it how to handle calls to SendMessage, so you can easily test success +// and failure scenarios. +type SyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + lastOffset int64 + + *TopicConfig + newPartitioner sarama.PartitionerConstructor + partitioners map[string]sarama.Partitioner + + isTransactional bool + txnLock sync.Mutex + txnStatus sarama.ProducerTxnStatusFlag +} + +// NewSyncProducer instantiates a new SyncProducer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is validated and used to handle +// partitioning. +func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + return &SyncProducer{ + t: t, + expectations: make([]*producerExpectation, 0), + TopicConfig: NewTopicConfig(), + newPartitioner: config.Producer.Partitioner, + partitioners: make(map[string]sarama.Partitioner, 1), + isTransactional: config.Producer.Transaction.ID != "", + txnStatus: sarama.ProducerTxnFlagReady, + } +} + +//////////////////////////////////////////////// +// Implement SyncProducer interface +//////////////////////////////////////////////// + +// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessage, so it knows +// how to handle them. You can set a function in each expectation so that the message value +// checked by this function and an error is returned if the match fails. +// If there is no more remaining expectation when SendMessage is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { + sp.l.Lock() + defer sp.l.Unlock() + + if sp.IsTransactional() && sp.txnStatus&sarama.ProducerTxnFlagInTransaction == 0 { + sp.t.Errorf("attempt to send message when transaction is not started or is in ending state.") + return -1, -1, errors.New("attempt to send message when transaction is not started or is in ending state") + } + + if len(sp.expectations) > 0 { + expectation := sp.expectations[0] + sp.expectations = sp.expectations[1:] + topic := msg.Topic + partition, err := sp.partitioner(topic).Partition(msg, sp.partitions(topic)) + if err != nil { + sp.t.Errorf("Partitioner returned an error: %s", err.Error()) + return -1, -1, err + } + msg.Partition = partition + if expectation.CheckFunction != nil { + errCheck := expectation.CheckFunction(msg) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return -1, -1, errCheck + } + } + if errors.Is(expectation.Result, errProduceSuccess) { + sp.lastOffset++ + msg.Offset = sp.lastOffset + return 0, msg.Offset, nil + } + return -1, -1, expectation.Result + } + sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + return -1, -1, errOutOfExpectations +} + +// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessages, so it knows +// how to handle them. If there is no more remaining expectations when SendMessages is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) >= len(msgs) { + expectations := sp.expectations[0:len(msgs)] + sp.expectations = sp.expectations[len(msgs):] + + for i, expectation := range expectations { + topic := msgs[i].Topic + partition, err := sp.partitioner(topic).Partition(msgs[i], sp.partitions(topic)) + if err != nil { + sp.t.Errorf("Partitioner returned an error: %s", err.Error()) + return err + } + msgs[i].Partition = partition + if expectation.CheckFunction != nil { + errCheck := expectation.CheckFunction(msgs[i]) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return errCheck + } + } + if !errors.Is(expectation.Result, errProduceSuccess) { + return expectation.Result + } + sp.lastOffset++ + msgs[i].Offset = sp.lastOffset + } + return nil + } + sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") + return errOutOfExpectations +} + +func (sp *SyncProducer) partitioner(topic string) sarama.Partitioner { + partitioner := sp.partitioners[topic] + if partitioner == nil { + partitioner = sp.newPartitioner(topic) + sp.partitioners[topic] = partitioner + } + return partitioner +} + +// Close corresponds with the Close method of sarama's SyncProducer implementation. +// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, +// so it will write an error to the test state if there's any remaining expectations. +func (sp *SyncProducer) Close() error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) + } + + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectSendMessageWithMessageCheckerFunctionAndSucceed sets an expectation on the mock producer +// that SendMessage will be called. The mock producer will first call the given function to check +// the message. It will cascade the error of the function, if any, or handle the message as if it +// produced successfully, i.e. by returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageWithMessageCheckerFunctionAndSucceed(cf MessageChecker) *SyncProducer { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) + + return sp +} + +// ExpectSendMessageWithMessageCheckerFunctionAndFail sets an expectation on the mock producer that +// SendMessage will be called. The mock producer will first call the given function to check the +// message. It will cascade the error of the function, if any, or handle the message as if it +// failed to produce successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageWithMessageCheckerFunctionAndFail(cf MessageChecker, err error) *SyncProducer { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) + + return sp +} + +// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage +// will be called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it produced +// successfully, i.e. by returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndSucceed(messageValueChecker(cf)) + + return sp +} + +// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it failed +// to produce successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndFail(messageValueChecker(cf), err) + + return sp +} + +// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it produced successfully, i.e. by +// returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageAndSucceed() *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndSucceed(nil) + + return sp +} + +// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it failed to produce +// successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageAndFail(err error) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndFail(nil, err) + + return sp +} + +func (sp *SyncProducer) IsTransactional() bool { + return sp.isTransactional +} + +func (sp *SyncProducer) BeginTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagInTransaction + return nil +} + +func (sp *SyncProducer) CommitTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (sp *SyncProducer) AbortTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (sp *SyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + return sp.txnStatus +} + +func (sp *SyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + return nil +} + +func (sp *SyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + return nil +} diff --git a/vendor/github.com/sideshow/apns2/.gitignore b/vendor/github.com/sideshow/apns2/.gitignore new file mode 100644 index 0000000000..5b77d5d22e --- /dev/null +++ b/vendor/github.com/sideshow/apns2/.gitignore @@ -0,0 +1,31 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +/*.p12 +/*.pem +/*.cer +/*.p8 + +.DS_Store \ No newline at end of file diff --git a/vendor/github.com/sideshow/apns2/LICENSE b/vendor/github.com/sideshow/apns2/LICENSE new file mode 100644 index 0000000000..59abbcf40e --- /dev/null +++ b/vendor/github.com/sideshow/apns2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Adam Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/sideshow/apns2/README.md b/vendor/github.com/sideshow/apns2/README.md new file mode 100644 index 0000000000..32e04190ce --- /dev/null +++ b/vendor/github.com/sideshow/apns2/README.md @@ -0,0 +1,216 @@ +# APNS/2 + +APNS/2 is a go package designed for simple, flexible and fast Apple Push Notifications on iOS, OSX and Safari using the new HTTP/2 Push provider API. + +[![Build Status](https://github.com/sideshow/apns2/actions/workflows/tests.yml/badge.svg)](https://github.com/sideshow/apns2/actions/workflows/tests.yml) [![Coverage Status](https://coveralls.io/repos/sideshow/apns2/badge.svg?branch=master&service=github)](https://coveralls.io/github/sideshow/apns2?branch=master) [![GoDoc](https://godoc.org/github.com/sideshow/apns2?status.svg)](https://godoc.org/github.com/sideshow/apns2) + +## Features + +- Uses new Apple APNs HTTP/2 connection +- Fast - See [notes on speed](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed) +- Works with go 1.7 and later +- Supports new Apple Token Based Authentication (JWT) +- Supports new iOS 10 features such as Collapse IDs, Subtitles and Mutable Notifications +- Supports new iOS 15 features interruptionLevel and relevanceScore +- Supports persistent connections to APNs +- Supports VoIP/PushKit notifications (iOS 8 and later) +- Modular & easy to use +- Tested and working in APNs production environment + +## Install + +- Make sure you have [Go](https://golang.org/doc/install) installed and have set your [GOPATH](https://golang.org/doc/code.html#GOPATH). +- Install apns2: + +```sh +go get -u github.com/sideshow/apns2 +``` + +If you are running the test suite you will also need to install testify: + +```sh +go get -u github.com/stretchr/testify +``` + +## Example + +```go +package main + +import ( + "log" + "fmt" + + "github.com/sideshow/apns2" + "github.com/sideshow/apns2/certificate" +) + +func main() { + + cert, err := certificate.FromP12File("../cert.p12", "") + if err != nil { + log.Fatal("Cert Error:", err) + } + + notification := &apns2.Notification{} + notification.DeviceToken = "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7" + notification.Topic = "com.sideshow.Apns2" + notification.Payload = []byte(`{"aps":{"alert":"Hello!"}}`) // See Payload section below + + // If you want to test push notifications for builds running directly from XCode (Development), use + // client := apns2.NewClient(cert).Development() + // For apps published to the app store or installed as an ad-hoc distribution use Production() + + client := apns2.NewClient(cert).Production() + res, err := client.Push(notification) + + if err != nil { + log.Fatal("Error:", err) + } + + fmt.Printf("%v %v %v\n", res.StatusCode, res.ApnsID, res.Reason) +} +``` + +## JWT Token Example + +Instead of using a `.p12` or `.pem` certificate as above, you can optionally use +APNs JWT _Provider Authentication Tokens_. First you will need a signing key (`.p8` file), Key ID and Team ID [from Apple](http://help.apple.com/xcode/mac/current/#/dev54d690a66). Once you have these details, you can create a new client: + +```go +authKey, err := token.AuthKeyFromFile("../AuthKey_XXX.p8") +if err != nil { + log.Fatal("token error:", err) +} + +token := &token.Token{ + AuthKey: authKey, + // KeyID from developer account (Certificates, Identifiers & Profiles -> Keys) + KeyID: "ABC123DEFG", + // TeamID from developer account (View Account -> Membership) + TeamID: "DEF123GHIJ", +} +... + +client := apns2.NewTokenClient(token) +res, err := client.Push(notification) +``` + +- You can use one APNs signing key to authenticate tokens for multiple apps. +- A signing key works for both the development and production environments. +- A signing key doesn’t expire but can be revoked. + +## Notification + +At a minimum, a _Notification_ needs a _DeviceToken_, a _Topic_ and a _Payload_. + +```go +notification := &apns2.Notification{ + DeviceToken: "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7", + Topic: "com.sideshow.Apns2", + Payload: []byte(`{"aps":{"alert":"Hello!"}}`), +} +``` + +You can also set an optional _ApnsID_, _Expiration_ or _Priority_. + +```go +notification.ApnsID = "40636A2C-C093-493E-936A-2A4333C06DEA" +notification.Expiration = time.Now() +notification.Priority = apns2.PriorityLow +``` + +## Payload + +You can use raw bytes for the `notification.Payload` as above, or you can use the payload builder package which makes it easy to construct APNs payloads. + +```go +// {"aps":{"alert":"hello","badge":1},"key":"val"} + +payload := payload.NewPayload().Alert("hello").Badge(1).Custom("key", "val") + +notification.Payload = payload +client.Push(notification) +``` + +Refer to the [payload](https://godoc.org/github.com/sideshow/apns2/payload) docs for more info. + +## Response, Error handling + +APNS/2 draws the distinction between a valid response from Apple indicating whether or not the _Notification_ was sent or not, and an unrecoverable or unexpected _Error_; + +- An `Error` is returned if a non-recoverable error occurs, i.e. if there is a problem with the underlying _http.Client_ connection or _Certificate_, the payload was not sent, or a valid _Response_ was not received. +- A `Response` is returned if the payload was successfully sent to Apple and a documented response was received. This struct will contain more information about whether or not the push notification succeeded, its _apns-id_ and if applicable, more information around why it did not succeed. + +To check if a `Notification` was successfully sent; + +```go +res, err := client.Push(notification) +if err != nil { + log.Println("There was an error", err) + return +} + +if res.Sent() { + log.Println("Sent:", res.ApnsID) +} else { + fmt.Printf("Not Sent: %v %v %v\n", res.StatusCode, res.ApnsID, res.Reason) +} +``` + +## Context & Timeouts + +For better control over request cancellations and timeouts APNS/2 supports +contexts. Using a context can be helpful if you want to cancel all pushes when +the parent process is cancelled, or need finer grained control over individual +push timeouts. See the [Google post](https://blog.golang.org/context) for more +information on contexts. + +```go +ctx, cancel = context.WithTimeout(context.Background(), 10 * time.Second) +res, err := client.PushWithContext(ctx, notification) +defer cancel() +``` + +## Speed & Performance + +Also see the wiki page on [APNS HTTP 2 Push Speed](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed). + +For best performance, you should hold on to an `apns2.Client` instance and not re-create it every push. The underlying TLS connection itself can take a few seconds to connect and negotiate, so if you are setting up an `apns2.Client` and tearing it down every push, then this will greatly affect performance. (Apple suggest keeping the connection open all the time). + +You should also limit the amount of `apns2.Client` instances. The underlying transport has a http connection pool itself, so a single client instance will be enough for most users (One instance can potentially do 4,000+ pushes per second). If you need more than this then one instance per CPU core is a good starting point. + +Speed is greatly affected by the location of your server and the quality of your network connection. If you're just testing locally, behind a proxy or if your server is outside USA then you're not going to get great performance. With a good server located in AWS, you should be able to get [decent throughput](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed). + +## Command line tool + +APNS/2 has a command line tool that can be installed with `go get github.com/sideshow/apns2/apns2`. Usage: + +``` +apns2 --help +usage: apns2 --certificate-path=CERTIFICATE-PATH --topic=TOPIC [] + +Listens to STDIN to send notifications and writes APNS response code and reason to STDOUT. + +The expected format is: +Example: aff0c63d9eaa63ad161bafee732d5bc2c31f66d552054718ff19ce314371e5d0 {"aps": {"alert": "hi"}} +Flags: + --help Show context-sensitive help (also try --help-long and --help-man). + -c, --certificate-path=CERTIFICATE-PATH + Path to certificate file. + -t, --topic=TOPIC The topic of the remote notification, which is typically the bundle ID for your app + -m, --mode="production" APNS server to send notifications to. `production` or `development`. Defaults to `production` + --version Show application version. +``` + +## License + +The MIT License (MIT) + +Copyright (c) 2016 Adam Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sideshow/apns2/client.go b/vendor/github.com/sideshow/apns2/client.go new file mode 100644 index 0000000000..cd98dd4228 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/client.go @@ -0,0 +1,238 @@ +// Package apns2 is a go Apple Push Notification Service (APNs) provider that +// allows you to send remote notifications to your iOS, tvOS, and OS X +// apps, using the new APNs HTTP/2 network protocol. +package apns2 + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "io" + "net" + "net/http" + "strconv" + "time" + + "github.com/sideshow/apns2/token" + "golang.org/x/net/http2" +) + +// Apple HTTP/2 Development & Production urls +const ( + HostDevelopment = "https://api.sandbox.push.apple.com" + HostProduction = "https://api.push.apple.com" +) + +// DefaultHost is a mutable var for testing purposes +var DefaultHost = HostDevelopment + +var ( + // HTTPClientTimeout specifies a time limit for requests made by the + // HTTPClient. The timeout includes connection time, any redirects, + // and reading the response body. + HTTPClientTimeout = 60 * time.Second + + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. If + // zero, no health check is performed. + ReadIdleTimeout = 15 * time.Second + + // TCPKeepAlive specifies the keep-alive period for an active network + // connection. If zero, keep-alive probes are sent with a default value + // (currently 15 seconds) + TCPKeepAlive = 15 * time.Second + + // TLSDialTimeout is the maximum amount of time a dial will wait for a connect + // to complete. + TLSDialTimeout = 20 * time.Second +) + +// DialTLS is the default dial function for creating TLS connections for +// non-proxied HTTPS requests. +var DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer := &net.Dialer{ + Timeout: TLSDialTimeout, + KeepAlive: TCPKeepAlive, + } + return tls.DialWithDialer(dialer, network, addr, cfg) +} + +// Client represents a connection with the APNs +type Client struct { + Host string + Certificate tls.Certificate + Token *token.Token + HTTPClient *http.Client +} + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. Context's methods may be called by multiple goroutines +// simultaneously. +type Context interface { + context.Context +} + +type connectionCloser interface { + CloseIdleConnections() +} + +// NewClient returns a new Client with an underlying http.Client configured with +// the correct APNs HTTP/2 transport settings. It does not connect to the APNs +// until the first Notification is sent via the Push method. +// +// As per the Apple APNs Provider API, you should keep a handle on this client +// so that you can keep your connections with APNs open across multiple +// notifications; don’t repeatedly open and close connections. APNs treats rapid +// connection and disconnection as a denial-of-service attack. +// +// If your use case involves multiple long-lived connections, consider using +// the ClientManager, which manages clients for you. +func NewClient(certificate tls.Certificate) *Client { + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{certificate}, + } + if len(certificate.Certificate) > 0 { + tlsConfig.BuildNameToCertificate() + } + transport := &http2.Transport{ + TLSClientConfig: tlsConfig, + DialTLS: DialTLS, + ReadIdleTimeout: ReadIdleTimeout, + } + return &Client{ + HTTPClient: &http.Client{ + Transport: transport, + Timeout: HTTPClientTimeout, + }, + Certificate: certificate, + Host: DefaultHost, + } +} + +// NewTokenClient returns a new Client with an underlying http.Client configured +// with the correct APNs HTTP/2 transport settings. It does not connect to the APNs +// until the first Notification is sent via the Push method. +// +// As per the Apple APNs Provider API, you should keep a handle on this client +// so that you can keep your connections with APNs open across multiple +// notifications; don’t repeatedly open and close connections. APNs treats rapid +// connection and disconnection as a denial-of-service attack. +func NewTokenClient(token *token.Token) *Client { + transport := &http2.Transport{ + DialTLS: DialTLS, + ReadIdleTimeout: ReadIdleTimeout, + } + return &Client{ + Token: token, + HTTPClient: &http.Client{ + Transport: transport, + Timeout: HTTPClientTimeout, + }, + Host: DefaultHost, + } +} + +// Development sets the Client to use the APNs development push endpoint. +func (c *Client) Development() *Client { + c.Host = HostDevelopment + return c +} + +// Production sets the Client to use the APNs production push endpoint. +func (c *Client) Production() *Client { + c.Host = HostProduction + return c +} + +// Push sends a Notification to the APNs gateway. If the underlying http.Client +// is not currently connected, this method will attempt to reconnect +// transparently before sending the notification. It will return a Response +// indicating whether the notification was accepted or rejected by the APNs +// gateway, or an error if something goes wrong. +// +// Use PushWithContext if you need better cancellation and timeout control. +func (c *Client) Push(n *Notification) (*Response, error) { + return c.PushWithContext(context.Background(), n) +} + +// PushWithContext sends a Notification to the APNs gateway. Context carries a +// deadline and a cancellation signal and allows you to close long running +// requests when the context timeout is exceeded. Context can be nil, for +// backwards compatibility. +// +// If the underlying http.Client is not currently connected, this method will +// attempt to reconnect transparently before sending the notification. It will +// return a Response indicating whether the notification was accepted or +// rejected by the APNs gateway, or an error if something goes wrong. +func (c *Client) PushWithContext(ctx Context, n *Notification) (*Response, error) { + payload, err := json.Marshal(n) + if err != nil { + return nil, err + } + + url := c.Host + "/3/device/" + n.DeviceToken + request, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + + if c.Token != nil { + c.setTokenHeader(request) + } + + setHeaders(request, n) + + response, err := c.HTTPClient.Do(request) + if err != nil { + return nil, err + } + defer response.Body.Close() + + r := &Response{} + r.StatusCode = response.StatusCode + r.ApnsID = response.Header.Get("apns-id") + + decoder := json.NewDecoder(response.Body) + if err := decoder.Decode(r); err != nil && err != io.EOF { + return &Response{}, err + } + return r, nil +} + +// CloseIdleConnections closes any underlying connections which were previously +// connected from previous requests but are now sitting idle. It will not +// interrupt any connections currently in use. +func (c *Client) CloseIdleConnections() { + c.HTTPClient.Transport.(connectionCloser).CloseIdleConnections() +} + +func (c *Client) setTokenHeader(r *http.Request) { + bearer := c.Token.GenerateIfExpired() + r.Header.Set("authorization", "bearer "+bearer) +} + +func setHeaders(r *http.Request, n *Notification) { + r.Header.Set("Content-Type", "application/json; charset=utf-8") + if n.Topic != "" { + r.Header.Set("apns-topic", n.Topic) + } + if n.ApnsID != "" { + r.Header.Set("apns-id", n.ApnsID) + } + if n.CollapseID != "" { + r.Header.Set("apns-collapse-id", n.CollapseID) + } + if n.Priority > 0 { + r.Header.Set("apns-priority", strconv.Itoa(n.Priority)) + } + if !n.Expiration.IsZero() { + r.Header.Set("apns-expiration", strconv.FormatInt(n.Expiration.Unix(), 10)) + } + if n.PushType != "" { + r.Header.Set("apns-push-type", string(n.PushType)) + } else { + r.Header.Set("apns-push-type", string(PushTypeAlert)) + } + +} diff --git a/vendor/github.com/sideshow/apns2/client_manager.go b/vendor/github.com/sideshow/apns2/client_manager.go new file mode 100644 index 0000000000..bb4bdf900d --- /dev/null +++ b/vendor/github.com/sideshow/apns2/client_manager.go @@ -0,0 +1,162 @@ +package apns2 + +import ( + "container/list" + "crypto/sha1" + "crypto/tls" + "sync" + "time" +) + +type managerItem struct { + key [sha1.Size]byte + client *Client + lastUsed time.Time +} + +// ClientManager is a way to manage multiple connections to the APNs. +type ClientManager struct { + // MaxSize is the maximum number of clients allowed in the manager. When + // this limit is reached, the least recently used client is evicted. Set + // zero for no limit. + MaxSize int + + // MaxAge is the maximum age of clients in the manager. Upon retrieval, if + // a client has remained unused in the manager for this duration or longer, + // it is evicted and nil is returned. Set zero to disable this + // functionality. + MaxAge time.Duration + + // Factory is the function which constructs clients if not found in the + // manager. + Factory func(certificate tls.Certificate) *Client + + cache map[[sha1.Size]byte]*list.Element + ll *list.List + mu sync.Mutex + once sync.Once +} + +// NewClientManager returns a new ClientManager for prolonged, concurrent usage +// of multiple APNs clients. ClientManager is flexible enough to work best for +// your use case. When a client is not found in the manager, Get will return +// the result of calling Factory, which can be a Client or nil. +// +// Having multiple clients per certificate in the manager is not allowed. +// +// By default, MaxSize is 64, MaxAge is 10 minutes, and Factory always returns +// a Client with default options. +func NewClientManager() *ClientManager { + manager := &ClientManager{ + MaxSize: 64, + MaxAge: 10 * time.Minute, + Factory: NewClient, + } + + manager.initInternals() + + return manager +} + +// Add adds a Client to the manager. You can use this to individually configure +// Clients in the manager. +func (m *ClientManager) Add(client *Client) { + m.initInternals() + m.mu.Lock() + defer m.mu.Unlock() + + key := cacheKey(client.Certificate) + now := time.Now() + if ele, hit := m.cache[key]; hit { + item := ele.Value.(*managerItem) + item.client = client + item.lastUsed = now + m.ll.MoveToFront(ele) + return + } + ele := m.ll.PushFront(&managerItem{key, client, now}) + m.cache[key] = ele + if m.MaxSize != 0 && m.ll.Len() > m.MaxSize { + m.mu.Unlock() + m.removeOldest() + m.mu.Lock() + } +} + +// Get gets a Client from the manager. If a Client is not found in the manager +// or if a Client has remained in the manager longer than MaxAge, Get will call +// the ClientManager's Factory function, store the result in the manager if +// non-nil, and return it. +func (m *ClientManager) Get(certificate tls.Certificate) *Client { + m.initInternals() + m.mu.Lock() + defer m.mu.Unlock() + + key := cacheKey(certificate) + now := time.Now() + if ele, hit := m.cache[key]; hit { + item := ele.Value.(*managerItem) + if m.MaxAge != 0 && item.lastUsed.Before(now.Add(-m.MaxAge)) { + c := m.Factory(certificate) + if c == nil { + return nil + } + item.client = c + } + item.lastUsed = now + m.ll.MoveToFront(ele) + return item.client + } + + c := m.Factory(certificate) + if c == nil { + return nil + } + m.mu.Unlock() + m.Add(c) + m.mu.Lock() + return c +} + +// Len returns the current size of the ClientManager. +func (m *ClientManager) Len() int { + if m.cache == nil { + return 0 + } + m.mu.Lock() + defer m.mu.Unlock() + return m.ll.Len() +} + +func (m *ClientManager) initInternals() { + m.once.Do(func() { + m.cache = map[[sha1.Size]byte]*list.Element{} + m.ll = list.New() + }) +} + +func (m *ClientManager) removeOldest() { + m.mu.Lock() + ele := m.ll.Back() + m.mu.Unlock() + if ele != nil { + m.removeElement(ele) + } +} + +func (m *ClientManager) removeElement(e *list.Element) { + m.mu.Lock() + defer m.mu.Unlock() + m.ll.Remove(e) + delete(m.cache, e.Value.(*managerItem).key) +} + +func cacheKey(certificate tls.Certificate) [sha1.Size]byte { + var data []byte + + for _, cert := range certificate.Certificate { + data = append(data, cert...) + } + + return sha1.Sum(data) +} diff --git a/vendor/github.com/sideshow/apns2/notification.go b/vendor/github.com/sideshow/apns2/notification.go new file mode 100644 index 0000000000..69bf312de5 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/notification.go @@ -0,0 +1,148 @@ +package apns2 + +import ( + "encoding/json" + "time" +) + +// EPushType defines the value for the apns-push-type header +type EPushType string + +const ( + // PushTypeAlert is used for notifications that trigger a user interaction — + // for example, an alert, badge, or sound. If you set this push type, the + // topic field must use your app’s bundle ID as the topic. If the + // notification requires immediate action from the user, set notification + // priority to 10; otherwise use 5. The alert push type is required on + // watchOS 6 and later. It is recommended on macOS, iOS, tvOS, and iPadOS. + PushTypeAlert EPushType = "alert" + + // PushTypeBackground is used for notifications that deliver content in the + // background, and don’t trigger any user interactions. If you set this push + // type, the topic field must use your app’s bundle ID as the topic. Always + // use priority 5. Using priority 10 is an error. The background push type + // is required on watchOS 6 and later. It is recommended on macOS, iOS, + // tvOS, and iPadOS. + PushTypeBackground EPushType = "background" + + // PushTypeLocation is used for notifications that request a user’s + // location. If you set this push type, the topic field must use your app’s + // bundle ID with .location-query appended to the end. The location push + // type is recommended for iOS and iPadOS. It isn’t available on macOS, + // tvOS, and watchOS. If the location query requires an immediate response + // from the Location Push Service Extension, set notification apns-priority + // to 10; otherwise, use 5. The location push type supports only token-based + // authentication. + PushTypeLocation EPushType = "location" + + // PushTypeVOIP is used for notifications that provide information about an + // incoming Voice-over-IP (VoIP) call. If you set this push type, the topic + // field must use your app’s bundle ID with .voip appended to the end. If + // you’re using certificate-based authentication, you must also register the + // certificate for VoIP services. The voip push type is not available on + // watchOS. It is recommended on macOS, iOS, tvOS, and iPadOS. + PushTypeVOIP EPushType = "voip" + + // PushTypeComplication is used for notifications that contain update + // information for a watchOS app’s complications. If you set this push type, + // the topic field must use your app’s bundle ID with .complication appended + // to the end. If you’re using certificate-based authentication, you must + // also register the certificate for WatchKit services. The complication + // push type is recommended for watchOS and iOS. It is not available on + // macOS, tvOS, and iPadOS. + PushTypeComplication EPushType = "complication" + + // PushTypeFileProvider is used to signal changes to a File Provider + // extension. If you set this push type, the topic field must use your app’s + // bundle ID with .pushkit.fileprovider appended to the end. The + // fileprovider push type is not available on watchOS. It is recommended on + // macOS, iOS, tvOS, and iPadOS. + PushTypeFileProvider EPushType = "fileprovider" + + // PushTypeMDM is used for notifications that tell managed devices to + // contact the MDM server. If you set this push type, you must use the topic + // from the UID attribute in the subject of your MDM push certificate. + PushTypeMDM EPushType = "mdm" +) + +const ( + // PriorityLow will tell APNs to send the push message at a time that takes + // into account power considerations for the device. Notifications with this + // priority might be grouped and delivered in bursts. They are throttled, + // and in some cases are not delivered. + PriorityLow = 5 + + // PriorityHigh will tell APNs to send the push message immediately. + // Notifications with this priority must trigger an alert, sound, or badge + // on the target device. It is an error to use this priority for a push + // notification that contains only the content-available key. + PriorityHigh = 10 +) + +// Notification represents the the data and metadata for a APNs Remote Notification. +type Notification struct { + + // An optional canonical UUID that identifies the notification. The + // canonical form is 32 lowercase hexadecimal digits, displayed in five + // groups separated by hyphens in the form 8-4-4-4-12. An example UUID is as + // follows: + // + // 123e4567-e89b-12d3-a456-42665544000 + // + // If you don't set this, a new UUID is created by APNs and returned in the + // response. + ApnsID string + + // A string which allows multiple notifications with the same collapse + // identifier to be displayed to the user as a single notification. The + // value should not exceed 64 bytes. + CollapseID string + + // A string containing hexadecimal bytes of the device token for the target + // device. + DeviceToken string + + // The topic of the remote notification, which is typically the bundle ID + // for your app. The certificate you create in the Apple Developer Member + // Center must include the capability for this topic. If your certificate + // includes multiple topics, you must specify a value for this header. If + // you omit this header and your APNs certificate does not specify multiple + // topics, the APNs server uses the certificate’s Subject as the default + // topic. + Topic string + + // An optional time at which the notification is no longer valid and can be + // discarded by APNs. If this value is in the past, APNs treats the + // notification as if it expires immediately and does not store the + // notification or attempt to redeliver it. If this value is left as the + // default (ie, Expiration.IsZero()) an expiration header will not added to + // the http request. + Expiration time.Time + + // The priority of the notification. Specify ether apns.PriorityHigh (10) or + // apns.PriorityLow (5) If you don't set this, the APNs server will set the + // priority to 10. + Priority int + + // A byte array containing the JSON-encoded payload of this push notification. + // Refer to "The Remote Notification Payload" section in the Apple Local and + // Remote Notification Programming Guide for more info. + Payload interface{} + + // The pushtype of the push notification. If this values is left as the + // default an apns-push-type header with value 'alert' will be added to the + // http request. + PushType EPushType +} + +// MarshalJSON converts the notification payload to JSON. +func (n *Notification) MarshalJSON() ([]byte, error) { + switch payload := n.Payload.(type) { + case string: + return []byte(payload), nil + case []byte: + return payload, nil + default: + return json.Marshal(payload) + } +} diff --git a/vendor/github.com/sideshow/apns2/payload/builder.go b/vendor/github.com/sideshow/apns2/payload/builder.go new file mode 100644 index 0000000000..a2ff30da10 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/payload/builder.go @@ -0,0 +1,402 @@ +// Package payload is a helper package which contains a payload +// builder to make constructing notification payloads easier. +package payload + +import "encoding/json" + +// InterruptionLevel defines the value for the payload aps interruption-level +type EInterruptionLevel string + +const ( + // InterruptionLevelPassive is used to indicate that notification be delivered in a passive manner. + InterruptionLevelPassive EInterruptionLevel = "passive" + + // InterruptionLevelActive is used to indicate the importance and delivery timing of a notification. + InterruptionLevelActive EInterruptionLevel = "active" + + // InterruptionLevelTimeSensitive is used to indicate the importance and delivery timing of a notification. + InterruptionLevelTimeSensitive EInterruptionLevel = "time-sensitive" + + // InterruptionLevelCritical is used to indicate the importance and delivery timing of a notification. + // This interruption level requires an approved entitlement from Apple. + // See: https://developer.apple.com/documentation/usernotifications/unnotificationinterruptionlevel/ + InterruptionLevelCritical EInterruptionLevel = "critical" +) + +// Payload represents a notification which holds the content that will be +// marshalled as JSON. +type Payload struct { + content map[string]interface{} +} + +type aps struct { + Alert interface{} `json:"alert,omitempty"` + Badge interface{} `json:"badge,omitempty"` + Category string `json:"category,omitempty"` + ContentAvailable int `json:"content-available,omitempty"` + InterruptionLevel EInterruptionLevel `json:"interruption-level,omitempty"` + MutableContent int `json:"mutable-content,omitempty"` + RelevanceScore interface{} `json:"relevance-score,omitempty"` + Sound interface{} `json:"sound,omitempty"` + ThreadID string `json:"thread-id,omitempty"` + URLArgs []string `json:"url-args,omitempty"` +} + +type alert struct { + Action string `json:"action,omitempty"` + ActionLocKey string `json:"action-loc-key,omitempty"` + Body string `json:"body,omitempty"` + LaunchImage string `json:"launch-image,omitempty"` + LocArgs []string `json:"loc-args,omitempty"` + LocKey string `json:"loc-key,omitempty"` + Title string `json:"title,omitempty"` + Subtitle string `json:"subtitle,omitempty"` + TitleLocArgs []string `json:"title-loc-args,omitempty"` + TitleLocKey string `json:"title-loc-key,omitempty"` + SummaryArg string `json:"summary-arg,omitempty"` + SummaryArgCount int `json:"summary-arg-count,omitempty"` +} + +type sound struct { + Critical int `json:"critical,omitempty"` + Name string `json:"name,omitempty"` + Volume float32 `json:"volume,omitempty"` +} + +// NewPayload returns a new Payload struct +func NewPayload() *Payload { + return &Payload{ + map[string]interface{}{ + "aps": &aps{}, + }, + } +} + +// Alert sets the aps alert on the payload. +// This will display a notification alert message to the user. +// +// {"aps":{"alert":alert}}` +func (p *Payload) Alert(alert interface{}) *Payload { + p.aps().Alert = alert + return p +} + +// Badge sets the aps badge on the payload. +// This will display a numeric badge on the app icon. +// +// {"aps":{"badge":b}} +func (p *Payload) Badge(b int) *Payload { + p.aps().Badge = b + return p +} + +// ZeroBadge sets the aps badge on the payload to 0. +// This will clear the badge on the app icon. +// +// {"aps":{"badge":0}} +func (p *Payload) ZeroBadge() *Payload { + p.aps().Badge = 0 + return p +} + +// UnsetBadge removes the badge attribute from the payload. +// This will leave the badge on the app icon unchanged. +// If you wish to clear the app icon badge, use ZeroBadge() instead. +// +// {"aps":{}} +func (p *Payload) UnsetBadge() *Payload { + p.aps().Badge = nil + return p +} + +// Sound sets the aps sound on the payload. +// This will play a sound from the app bundle, or the default sound otherwise. +// +// {"aps":{"sound":sound}} +func (p *Payload) Sound(sound interface{}) *Payload { + p.aps().Sound = sound + return p +} + +// ContentAvailable sets the aps content-available on the payload to 1. +// This will indicate to the app that there is new content available to download +// and launch the app in the background. +// +// {"aps":{"content-available":1}} +func (p *Payload) ContentAvailable() *Payload { + p.aps().ContentAvailable = 1 + return p +} + +// MutableContent sets the aps mutable-content on the payload to 1. +// This will indicate to the to the system to call your Notification Service +// extension to mutate or replace the notification's content. +// +// {"aps":{"mutable-content":1}} +func (p *Payload) MutableContent() *Payload { + p.aps().MutableContent = 1 + return p +} + +// Custom payload + +// Custom sets a custom key and value on the payload. +// This will add custom key/value data to the notification payload at root level. +// +// {"aps":{}, key:value} +func (p *Payload) Custom(key string, val interface{}) *Payload { + p.content[key] = val + return p +} + +// Alert dictionary + +// AlertTitle sets the aps alert title on the payload. +// This will display a short string describing the purpose of the notification. +// Apple Watch & Safari display this string as part of the notification interface. +// +// {"aps":{"alert":{"title":title}}} +func (p *Payload) AlertTitle(title string) *Payload { + p.aps().alert().Title = title + return p +} + +// AlertTitleLocKey sets the aps alert title localization key on the payload. +// This is the key to a title string in the Localizable.strings file for the +// current localization. See Localized Formatted Strings in Apple documentation +// for more information. +// +// {"aps":{"alert":{"title-loc-key":key}}} +func (p *Payload) AlertTitleLocKey(key string) *Payload { + p.aps().alert().TitleLocKey = key + return p +} + +// AlertTitleLocArgs sets the aps alert title localization args on the payload. +// These are the variable string values to appear in place of the format +// specifiers in title-loc-key. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"title-loc-args":args}}} +func (p *Payload) AlertTitleLocArgs(args []string) *Payload { + p.aps().alert().TitleLocArgs = args + return p +} + +// AlertSubtitle sets the aps alert subtitle on the payload. +// This will display a short string describing the purpose of the notification. +// Apple Watch & Safari display this string as part of the notification interface. +// +// {"aps":{"alert":{"subtitle":"subtitle"}}} +func (p *Payload) AlertSubtitle(subtitle string) *Payload { + p.aps().alert().Subtitle = subtitle + return p +} + +// AlertBody sets the aps alert body on the payload. +// This is the text of the alert message. +// +// {"aps":{"alert":{"body":body}}} +func (p *Payload) AlertBody(body string) *Payload { + p.aps().alert().Body = body + return p +} + +// AlertLaunchImage sets the aps launch image on the payload. +// This is the filename of an image file in the app bundle. The image is used +// as the launch image when users tap the action button or move the action +// slider. +// +// {"aps":{"alert":{"launch-image":image}}} +func (p *Payload) AlertLaunchImage(image string) *Payload { + p.aps().alert().LaunchImage = image + return p +} + +// AlertLocArgs sets the aps alert localization args on the payload. +// These are the variable string values to appear in place of the format +// specifiers in loc-key. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"loc-args":args}}} +func (p *Payload) AlertLocArgs(args []string) *Payload { + p.aps().alert().LocArgs = args + return p +} + +// AlertLocKey sets the aps alert localization key on the payload. +// This is the key to an alert-message string in the Localizable.strings file +// for the current localization. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"loc-key":key}}} +func (p *Payload) AlertLocKey(key string) *Payload { + p.aps().alert().LocKey = key + return p +} + +// AlertAction sets the aps alert action on the payload. +// This is the label of the action button, if the user sets the notifications +// to appear as alerts. This label should be succinct, such as “Details” or +// “Read more”. If omitted, the default value is “Show”. +// +// {"aps":{"alert":{"action":action}}} +func (p *Payload) AlertAction(action string) *Payload { + p.aps().alert().Action = action + return p +} + +// AlertActionLocKey sets the aps alert action localization key on the payload. +// This is the the string used as a key to get a localized string in the current +// localization to use for the notfication right button’s title instead of +// “View”. See Localized Formatted Strings in Apple documentation for more +// information. +// +// {"aps":{"alert":{"action-loc-key":key}}} +func (p *Payload) AlertActionLocKey(key string) *Payload { + p.aps().alert().ActionLocKey = key + return p +} + +// AlertSummaryArg sets the aps alert summary arg key on the payload. +// This is the string that is used as a key to fill in an argument +// at the bottom of a notification to provide more context, such as +// a name associated with the sender of the notification. +// +// {"aps":{"alert":{"summary-arg":key}}} +func (p *Payload) AlertSummaryArg(key string) *Payload { + p.aps().alert().SummaryArg = key + return p +} + +// AlertSummaryArgCount sets the aps alert summary arg count key on the payload. +// This integer sets a custom "weight" on the notification, effectively +// allowing a notification to be viewed internally as two. For example if +// a notification encompasses 3 messages, you can set it to 3. +// +// {"aps":{"alert":{"summary-arg-count":key}}} +func (p *Payload) AlertSummaryArgCount(key int) *Payload { + p.aps().alert().SummaryArgCount = key + return p +} + +// General + +// Category sets the aps category on the payload. +// This is a string value that represents the identifier property of the +// UIMutableUserNotificationCategory object you created to define custom actions. +// +// {"aps":{"category":category}} +func (p *Payload) Category(category string) *Payload { + p.aps().Category = category + return p +} + +// Mdm sets the mdm on the payload. +// This is for Apple Mobile Device Management (mdm) payloads. +// +// {"aps":{}:"mdm":mdm} +func (p *Payload) Mdm(mdm string) *Payload { + p.content["mdm"] = mdm + return p +} + +// ThreadID sets the aps thread id on the payload. +// This is for the purpose of updating the contents of a View Controller in a +// Notification Content app extension when a new notification arrives. If a +// new notification arrives whose thread-id value matches the thread-id of the +// notification already being displayed, the didReceiveNotification method +// is called. +// +// {"aps":{"thread-id":id}} +func (p *Payload) ThreadID(threadID string) *Payload { + p.aps().ThreadID = threadID + return p +} + +// URLArgs sets the aps category on the payload. +// This specifies an array of values that are paired with the placeholders +// inside the urlFormatString value of your website.json file. +// See Apple Notification Programming Guide for Websites. +// +// {"aps":{"url-args":urlArgs}} +func (p *Payload) URLArgs(urlArgs []string) *Payload { + p.aps().URLArgs = urlArgs + return p +} + +// SoundName sets the name value on the aps sound dictionary. +// This function makes the notification a critical alert, which should be pre-approved by Apple. +// See: https://developer.apple.com/contact/request/notifications-critical-alerts-entitlement/ +// +// {"aps":{"sound":{"critical":1,"name":name,"volume":1.0}}} +func (p *Payload) SoundName(name string) *Payload { + p.aps().sound().Name = name + return p +} + +// SoundVolume sets the volume value on the aps sound dictionary. +// This function makes the notification a critical alert, which should be pre-approved by Apple. +// See: https://developer.apple.com/contact/request/notifications-critical-alerts-entitlement/ +// +// {"aps":{"sound":{"critical":1,"name":"default","volume":volume}}} +func (p *Payload) SoundVolume(volume float32) *Payload { + p.aps().sound().Volume = volume + return p +} + +// InterruptionLevel defines the value for the payload aps interruption-level +// This is to indicate the importance and delivery timing of a notification. +// (Using InterruptionLevelCritical requires an approved entitlement from Apple.) +// See: https://developer.apple.com/documentation/usernotifications/unnotificationinterruptionlevel/ +// +// {"aps":{"interruption-level":passive}} +func (p *Payload) InterruptionLevel(interruptionLevel EInterruptionLevel) *Payload { + p.aps().InterruptionLevel = interruptionLevel + return p +} + +// The relevance score, a number between 0 and 1, +// that the system uses to sort the notifications from your app. +// The highest score gets featured in the notification summary. +// See https://developer.apple.com/documentation/usernotifications/unnotificationcontent/3821031-relevancescore. +// +// {"aps":{"relevance-score":0.1}} +func (p *Payload) RelevanceScore(b float32) *Payload { + p.aps().RelevanceScore = b + return p +} + +// Unsets the relevance score +// that the system uses to sort the notifications from your app. +// The highest score gets featured in the notification summary. +// See https://developer.apple.com/documentation/usernotifications/unnotificationcontent/3821031-relevancescore. +// +// {"aps":{"relevance-score":0.1}} +func (p *Payload) UnsetRelevanceScore() *Payload { + p.aps().RelevanceScore = nil + return p +} + +// MarshalJSON returns the JSON encoded version of the Payload +func (p *Payload) MarshalJSON() ([]byte, error) { + return json.Marshal(p.content) +} + +func (p *Payload) aps() *aps { + return p.content["aps"].(*aps) +} + +func (a *aps) alert() *alert { + if _, ok := a.Alert.(*alert); !ok { + a.Alert = &alert{} + } + return a.Alert.(*alert) +} + +func (a *aps) sound() *sound { + if _, ok := a.Sound.(*sound); !ok { + a.Sound = &sound{Critical: 1, Name: "default", Volume: 1.0} + } + return a.Sound.(*sound) +} diff --git a/vendor/github.com/sideshow/apns2/response.go b/vendor/github.com/sideshow/apns2/response.go new file mode 100644 index 0000000000..99d6345634 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/response.go @@ -0,0 +1,156 @@ +package apns2 + +import ( + "net/http" + "strconv" + "time" +) + +// StatusSent is a 200 response. +const StatusSent = http.StatusOK + +// The possible Reason error codes returned from APNs. From table 4 in the +// Handling Notification Responses from APNs article +const ( + // 400 The collapse identifier exceeds the maximum allowed size + ReasonBadCollapseID = "BadCollapseId" + + // 400 The specified device token was bad. Verify that the request contains a + // valid token and that the token matches the environment. + ReasonBadDeviceToken = "BadDeviceToken" + + // 400 The apns-expiration value is bad. + ReasonBadExpirationDate = "BadExpirationDate" + + // 400 The apns-id value is bad. + ReasonBadMessageID = "BadMessageId" + + // 400 The apns-priority value is bad. + ReasonBadPriority = "BadPriority" + + // 400 The apns-topic was invalid. + ReasonBadTopic = "BadTopic" + + // 400 The device token does not match the specified topic. + ReasonDeviceTokenNotForTopic = "DeviceTokenNotForTopic" + + // 400 One or more headers were repeated. + ReasonDuplicateHeaders = "DuplicateHeaders" + + // 400 Idle time out. + ReasonIdleTimeout = "IdleTimeout" + + // 400 The apns-push-type value is invalid. + ReasonInvalidPushType = "InvalidPushType" + + // 400 The device token is not specified in the request :path. Verify that the + // :path header contains the device token. + ReasonMissingDeviceToken = "MissingDeviceToken" + + // 400 The apns-topic header of the request was not specified and was + // required. The apns-topic header is mandatory when the client is connected + // using a certificate that supports multiple topics. + ReasonMissingTopic = "MissingTopic" + + // 400 The message payload was empty. + ReasonPayloadEmpty = "PayloadEmpty" + + // 400 Pushing to this topic is not allowed. + ReasonTopicDisallowed = "TopicDisallowed" + + // 403 The certificate was bad. + ReasonBadCertificate = "BadCertificate" + + // 403 The client certificate was for the wrong environment. + ReasonBadCertificateEnvironment = "BadCertificateEnvironment" + + // 403 The provider token is stale and a new token should be generated. + ReasonExpiredProviderToken = "ExpiredProviderToken" + + // 403 The specified action is not allowed. + ReasonForbidden = "Forbidden" + + // 403 The provider token is not valid or the token signature could not be + // verified. + ReasonInvalidProviderToken = "InvalidProviderToken" + + // 403 No provider certificate was used to connect to APNs and Authorization + // header was missing or no provider token was specified. + ReasonMissingProviderToken = "MissingProviderToken" + + // 404 The request contained a bad :path value. + ReasonBadPath = "BadPath" + + // 405 The specified :method was not POST. + ReasonMethodNotAllowed = "MethodNotAllowed" + + // 410 The device token is inactive for the specified topic. + ReasonUnregistered = "Unregistered" + + // 413 The message payload was too large. See Creating the Remote Notification + // Payload in the Apple Local and Remote Notification Programming Guide for + // details on maximum payload size. + ReasonPayloadTooLarge = "PayloadTooLarge" + + // 429 The provider token is being updated too often. + ReasonTooManyProviderTokenUpdates = "TooManyProviderTokenUpdates" + + // 429 Too many requests were made consecutively to the same device token. + ReasonTooManyRequests = "TooManyRequests" + + // 500 An internal server error occurred. + ReasonInternalServerError = "InternalServerError" + + // 503 The service is unavailable. + ReasonServiceUnavailable = "ServiceUnavailable" + + // 503 The server is shutting down. + ReasonShutdown = "Shutdown" +) + +// Response represents a result from the APNs gateway indicating whether a +// notification was accepted or rejected and (if applicable) the metadata +// surrounding the rejection. +type Response struct { + + // The HTTP status code returned by APNs. + // A 200 value indicates that the notification was successfully sent. + // For a list of other possible status codes, see table 6-4 in the Apple Local + // and Remote Notification Programming Guide. + StatusCode int + + // The APNs error string indicating the reason for the notification failure (if + // any). The error code is specified as a string. For a list of possible + // values, see the Reason constants above. + // If the notification was accepted, this value will be "". + Reason string + + // The APNs ApnsID value from the Notification. If you didn't set an ApnsID on the + // Notification, this will be a new unique UUID which has been created by APNs. + ApnsID string + + // If the value of StatusCode is 410, this is the last time at which APNs + // confirmed that the device token was no longer valid for the topic. + Timestamp Time +} + +// Sent returns whether or not the notification was successfully sent. +// This is the same as checking if the StatusCode == 200. +func (c *Response) Sent() bool { + return c.StatusCode == StatusSent +} + +// Time represents a device uninstall time +type Time struct { + time.Time +} + +// UnmarshalJSON converts an epoch date into a Time struct. +func (t *Time) UnmarshalJSON(b []byte) error { + ts, err := strconv.ParseInt(string(b), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(ts/1000, 0) + return nil +} diff --git a/vendor/github.com/sideshow/apns2/token/token.go b/vendor/github.com/sideshow/apns2/token/token.go new file mode 100644 index 0000000000..26fec563dd --- /dev/null +++ b/vendor/github.com/sideshow/apns2/token/token.go @@ -0,0 +1,107 @@ +package token + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" + "io/ioutil" + "sync" + "time" + + "github.com/golang-jwt/jwt/v4" +) + +const ( + // TokenTimeout is the period of time in seconds that a token is valid for. + // If the timestamp for token issue is not within the last hour, APNs + // rejects subsequent push messages. This is set to under an hour so that + // we generate a new token before the existing one expires. + TokenTimeout = 3000 +) + +// Possible errors when parsing a .p8 file. +var ( + ErrAuthKeyNotPem = errors.New("token: AuthKey must be a valid .p8 PEM file") + ErrAuthKeyNotECDSA = errors.New("token: AuthKey must be of type ecdsa.PrivateKey") + ErrAuthKeyNil = errors.New("token: AuthKey was nil") +) + +// Token represents an Apple Provider Authentication Token (JSON Web Token). +type Token struct { + sync.Mutex + AuthKey *ecdsa.PrivateKey + KeyID string + TeamID string + IssuedAt int64 + Bearer string +} + +// AuthKeyFromFile loads a .p8 certificate from a local file and returns a +// *ecdsa.PrivateKey. +func AuthKeyFromFile(filename string) (*ecdsa.PrivateKey, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return AuthKeyFromBytes(bytes) +} + +// AuthKeyFromBytes loads a .p8 certificate from an in memory byte array and +// returns an *ecdsa.PrivateKey. +func AuthKeyFromBytes(bytes []byte) (*ecdsa.PrivateKey, error) { + block, _ := pem.Decode(bytes) + if block == nil { + return nil, ErrAuthKeyNotPem + } + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + if pk, ok := key.(*ecdsa.PrivateKey); ok { + return pk, nil + } + return nil, ErrAuthKeyNotECDSA +} + +// GenerateIfExpired checks to see if the token is about to expire and +// generates a new token. +func (t *Token) GenerateIfExpired() (bearer string) { + t.Lock() + defer t.Unlock() + if t.Expired() { + t.Generate() + } + return t.Bearer +} + +// Expired checks to see if the token has expired. +func (t *Token) Expired() bool { + return time.Now().Unix() >= (t.IssuedAt + TokenTimeout) +} + +// Generate creates a new token. +func (t *Token) Generate() (bool, error) { + if t.AuthKey == nil { + return false, ErrAuthKeyNil + } + issuedAt := time.Now().Unix() + jwtToken := &jwt.Token{ + Header: map[string]interface{}{ + "alg": "ES256", + "kid": t.KeyID, + }, + Claims: jwt.MapClaims{ + "iss": t.TeamID, + "iat": issuedAt, + }, + Method: jwt.SigningMethodES256, + } + bearer, err := jwtToken.SignedString(t.AuthKey) + if err != nil { + return false, err + } + t.IssuedAt = issuedAt + t.Bearer = bearer + return true, nil +} diff --git a/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go b/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go new file mode 100644 index 0000000000..83ad151e31 --- /dev/null +++ b/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go @@ -0,0 +1,224 @@ +package asyncevents + +import ( + "context" + "errors" + "fmt" + "log/slog" + "math" + "time" + + "github.com/IBM/sarama" +) + +// SaramaEventsConsumer consumes Kafka messages for asynchronous event +// handling. +type SaramaEventsConsumer struct { + Handler sarama.ConsumerGroupHandler + ConsumerGroup sarama.ConsumerGroup + Topics []string +} + +func NewSaramaEventsConsumer(consumerGroup sarama.ConsumerGroup, + handler sarama.ConsumerGroupHandler, topics ...string) *SaramaEventsConsumer { + + return &SaramaEventsConsumer{ + ConsumerGroup: consumerGroup, + Handler: handler, + Topics: topics, + } +} + +// Run the consumer, to begin consuming Kafka messages. +// +// Run is stopped by its context being canceled. When its context is canceled, +// it returns nil. +func (p *SaramaEventsConsumer) Run(ctx context.Context) (err error) { + for { + err := p.ConsumerGroup.Consume(ctx, p.Topics, p.Handler) + if err != nil { + return err + } + if ctxErr := ctx.Err(); ctxErr != nil { + return nil + } + } +} + +// SaramaConsumerGroupHandler implements sarama.ConsumerGroupHandler. +type SaramaConsumerGroupHandler struct { + Consumer SaramaMessageConsumer + ConsumerTimeout time.Duration + Logger Logger +} + +// NewSaramaConsumerGroupHandler builds a consumer group handler. +// +// A timeout of 0 will use DefaultMessageConsumptionTimeout. +func NewSaramaConsumerGroupHandler(logger Logger, consumer SaramaMessageConsumer, + timeout time.Duration) *SaramaConsumerGroupHandler { + + if timeout == 0 { + timeout = DefaultMessageConsumptionTimeout + } + if logger == nil { + logger = slog.Default() + } + return &SaramaConsumerGroupHandler{ + Consumer: consumer, + ConsumerTimeout: timeout, + Logger: logger, + } +} + +const ( + // DefaultMessageConsumptionTimeout is the default time to allow + // SaramaMessageConsumer.Consume to work before canceling. + DefaultMessageConsumptionTimeout = 30 * time.Second +) + +// Setup implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } + +// Cleanup implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } + +// ConsumeClaim implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, + claim sarama.ConsumerGroupClaim) error { + + done := session.Context().Done() + for { + select { + case <-done: + return nil + case message, more := <-claim.Messages(): + if !more { + return nil + } + err := func() error { + ctx, cancel := context.WithTimeout(session.Context(), h.ConsumerTimeout) + defer cancel() + return h.Consumer.Consume(ctx, session, message) + }() + switch { + case errors.Is(err, context.DeadlineExceeded): + h.Logger.Log(session.Context(), slog.LevelDebug, err.Error()) + case !errors.Is(err, nil): + return err + } + } + } +} + +// Close implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Close() error { return nil } + +// SaramaMessageConsumer processes Kafka messages. +type SaramaMessageConsumer interface { + // Consume should process a message. + // + // Consume is responsible for marking the message consumed, unless the + // context is canceled, in which case the caller should retry, or mark the + // message as appropriate. + Consume(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error +} + +var ErrRetriesLimitExceeded = errors.New("retry limit exceeded") + +// NTimesRetryingConsumer enhances a SaramaMessageConsumer with a finite +// number of immediate retries. +// +// The delay between each retry can be controlled via the Delay property. If +// no Delay property is specified, a delay based on the Fibonacci sequence is +// used. +// +// Logger is intentionally minimal. The slog.Log function is used by default. +type NTimesRetryingConsumer struct { + Times int + Consumer SaramaMessageConsumer + Delay func(tries int) time.Duration + Logger Logger +} + +// Logger is an intentionally minimal interface for basic logging. +// +// It matches the signature of slog.Log. +type Logger interface { + Log(ctx context.Context, level slog.Level, msg string, args ...any) +} + +func (c *NTimesRetryingConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) (err error) { + + var joinedErrors error + var tries int = 0 + var delay time.Duration = 0 + if c.Delay == nil { + c.Delay = DelayFibonacci + } + if c.Logger == nil { + c.Logger = slog.Default() + } + done := ctx.Done() + for tries < c.Times { + select { + case <-done: + return nil + case <-time.After(delay): + err := c.Consumer.Consume(ctx, session, message) + if errors.Is(err, nil) || errors.Is(err, context.Canceled) { + return nil + } + delay = c.Delay(tries) + c.Logger.Log(ctx, slog.LevelInfo, "failure consuming Kafka message, will retry", + slog.Attr{Key: "tries", Value: slog.IntValue(tries)}, + slog.Attr{Key: "times", Value: slog.IntValue(c.Times)}, + slog.Attr{Key: "delay", Value: slog.DurationValue(delay)}, + slog.Attr{Key: "err", Value: slog.AnyValue(err)}, + ) + joinedErrors = errors.Join(joinedErrors, err) + tries++ + } + } + + return errors.Join(joinedErrors, c.retryLimitError()) +} + +func (c *NTimesRetryingConsumer) retryLimitError() error { + return fmt.Errorf("%w (%d)", ErrRetriesLimitExceeded, c.Times) +} + +// DelayNone is a function returning a constant "no delay" of 0 seconds. +var DelayNone = func(_ int) time.Duration { return DelayConstant(0) } + +// DelayConstant is a function returning a constant number of seconds. +func DelayConstant(n int) time.Duration { return time.Duration(n) * time.Second } + +// DelayExponentialBinary returns a binary exponential delay. +// +// The delay is 2**tries seconds. +func DelayExponentialBinary(tries int) time.Duration { + return time.Second * time.Duration(math.Pow(2, float64(tries))) +} + +// DelayFibonacci returns a delay based on the Fibonacci sequence. +func DelayFibonacci(tries int) time.Duration { + return time.Second * time.Duration(Fib(tries)) +} + +// Fib returns the nth number in the Fibonacci sequence. +func Fib(n int) int { + if n == 0 { + return 0 + } else if n < 3 { + return 1 + } + + n1, n2 := 1, 1 + for i := 3; i <= n; i++ { + n1, n2 = n1+n2, n1 + } + + return n1 +} diff --git a/vendor/github.com/tidepool-org/go-common/events/config.go b/vendor/github.com/tidepool-org/go-common/events/config.go index a07d70ed6e..5deff14d06 100644 --- a/vendor/github.com/tidepool-org/go-common/events/config.go +++ b/vendor/github.com/tidepool-org/go-common/events/config.go @@ -2,6 +2,7 @@ package events import ( "errors" + "github.com/IBM/sarama" "github.com/kelseyhightower/envconfig" ) diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/crypto/LICENSE +++ b/vendor/golang.org/x/crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go index d1911c2e86..7d9281e025 100644 --- a/vendor/golang.org/x/crypto/md4/md4.go +++ b/vendor/golang.org/x/crypto/md4/md4.go @@ -7,7 +7,7 @@ // Deprecated: MD4 is cryptographically broken and should only be used // where compatibility with legacy systems, not security, is the goal. Instead, // use a secure hash like SHA-256 (from crypto/sha256). -package md4 // import "golang.org/x/crypto/md4" +package md4 import ( "crypto" diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index bf2259537d..e6c645e7ce 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -5,7 +5,7 @@ // Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses // are signed messages attesting to the validity of a certificate for a small // period of time. This is used to manage revocation for X.509 certificates. -package ocsp // import "golang.org/x/crypto/ocsp" +package ocsp import ( "crypto" diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 904b57e01d..28cd99c7f3 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To choose, you can pass the `New` functions from the different SHA packages to pbkdf2.Key. */ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" +package pbkdf2 import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go index c971a99fa6..76fa40fb20 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -5,7 +5,7 @@ // Package scrypt implements the scrypt key derivation function as defined in // Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard // Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt // import "golang.org/x/crypto/scrypt" +package scrypt import ( "crypto/sha256" diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 3a7e5ab176..885c4c5936 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order: if err != nil { // ... } - var f func(*html.Node) - f = func(n *html.Node) { + for n := range doc.Descendants() { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } } - f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go index c484e5a94f..bca3ae9a0c 100644 --- a/vendor/golang.org/x/net/html/doctype.go +++ b/vendor/golang.org/x/net/html/doctype.go @@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) { } } if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") { quirks = true } } diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go index 9da9e9dc42..e8515d8e88 100644 --- a/vendor/golang.org/x/net/html/foreign.go +++ b/vendor/golang.org/x/net/html/foreign.go @@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool { if n.Data == "annotation-xml" { for _, a := range n.Attr { if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { + if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") { return true } } diff --git a/vendor/golang.org/x/net/html/iter.go b/vendor/golang.org/x/net/html/iter.go new file mode 100644 index 0000000000..54be8fd30f --- /dev/null +++ b/vendor/golang.org/x/net/html/iter.go @@ -0,0 +1,56 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package html + +import "iter" + +// Ancestors returns an iterator over the ancestors of n, starting with n.Parent. +// +// Mutating a Node or its parents while iterating may have unexpected results. +func (n *Node) Ancestors() iter.Seq[*Node] { + _ = n.Parent // eager nil check + + return func(yield func(*Node) bool) { + for p := n.Parent; p != nil && yield(p); p = p.Parent { + } + } +} + +// ChildNodes returns an iterator over the immediate children of n, +// starting with n.FirstChild. +// +// Mutating a Node or its children while iterating may have unexpected results. +func (n *Node) ChildNodes() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling { + } + } + +} + +// Descendants returns an iterator over all nodes recursively beneath +// n, excluding n itself. Nodes are visited in depth-first preorder. +// +// Mutating a Node or its descendants while iterating may have unexpected results. +func (n *Node) Descendants() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + n.descendants(yield) + } +} + +func (n *Node) descendants(yield func(*Node) bool) bool { + for c := range n.ChildNodes() { + if !yield(c) || !c.descendants(yield) { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 1350eef22c..77741a1950 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode} // that it looks like "a maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 0000000000..5b516c55ff --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 0000000000..060fd6c64c --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b279c..81faec7e75 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f30..c7601c909f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -33,10 +34,11 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -49,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -140,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -149,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { @@ -237,13 +248,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +287,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +305,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3ec6..b55547aec6 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -336,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -353,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -440,13 +429,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +447,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +485,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +526,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +563,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +603,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +623,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +634,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +656,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +920,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -935,20 +932,24 @@ func (sc *serverConn) serve() { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, + write: settings, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +969,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +994,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1026,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1050,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1066,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1106,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1359,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1637,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -1757,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2160,7 +2211,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2187,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2215,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2241,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2282,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { @@ -2855,6 +2916,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3301,7 +3367,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 98a49c6b6e..b2e2ed3373 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -203,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -227,40 +240,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -296,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -308,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -339,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -350,31 +368,55 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + closedOnIdle bool // true if conn was closed for idleness + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -432,12 +474,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -499,6 +541,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +551,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -554,6 +584,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -586,7 +618,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -597,7 +636,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -622,6 +661,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -640,9 +695,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -758,44 +814,38 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + seenSettingsChan: make(chan struct{}), + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,30 +857,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -838,11 +883,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -852,8 +895,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -871,7 +914,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -999,7 +1042,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -1031,16 +1074,40 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1053,7 +1120,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1091,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1415,6 +1483,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1430,12 +1500,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1617,6 +1706,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1641,16 +1731,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + } + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1672,12 +1790,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -1949,7 +2072,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -1965,6 +2088,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -1985,7 +2112,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2022,7 +2149,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2203,7 +2330,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2219,10 +2346,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2282,7 +2409,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2306,6 +2432,27 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2349,7 +2496,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) @@ -2363,7 +2510,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2409,13 +2556,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2533,15 +2683,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -2725,7 +2894,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2860,9 +3029,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2956,6 +3138,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -2973,6 +3170,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -2981,7 +3179,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3010,7 +3208,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3085,6 +3283,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3107,13 +3311,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3267,7 +3478,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 0000000000..b2de211613 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398a1..6ff6bee7e9 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/sys/LICENSE +++ b/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e12..6e08a76a71 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab8..7ca4fa12aa 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 4ed2e488b6..6ab02b6c31 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include @@ -157,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -255,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -526,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || @@ -551,6 +564,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -654,7 +668,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -664,7 +678,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529d..3a5e776f89 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef2d..6f15ba1eaf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897d..099867deed 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -542,6 +554,55 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2c..be8c002070 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8e..a6a2d2fc2b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e2628a..230a94549a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1818,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) @@ -1959,7 +2002,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) @@ -2592,3 +2654,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c75e..745e5c7e6c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e98451f..dd2262a407 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a288944..8cf3670bda 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c71a..b86ded549c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c7..4e92e5aa40 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1d..7bf5c04bb0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 0000000000..07ac8e09d1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 0000000000..297e97bce9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa85245..d73c4652e6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c05..4a55a40058 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 877a62b479..6ebc48b3fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -457,6 +460,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -488,12 +492,14 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -928,6 +934,7 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 ESP_V4_FLOW = 0xa ESP_V6_FLOW = 0xc @@ -941,9 +948,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1705,6 +1710,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1780,6 +1786,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1797,6 +1804,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1861,6 +1870,19 @@ const ( MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -1908,6 +1930,8 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2173,7 +2197,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2342,9 +2366,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2417,6 +2443,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2498,6 +2525,23 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -2589,6 +2633,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2902,15 +2968,17 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 @@ -3179,6 +3247,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3192,8 +3261,10 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3592,6 +3663,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e4bc0bd57c..c0d45e3205 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -151,9 +154,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -230,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 @@ -276,6 +298,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -314,6 +338,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 689317afdb..c731d24f02 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -151,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -230,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 @@ -277,6 +299,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -315,6 +339,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668ac3..680018a4a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 @@ -282,6 +304,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -320,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 14270508b0..a63909f308 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 @@ -110,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -152,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -198,6 +206,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -233,6 +242,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f @@ -273,6 +296,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -311,6 +336,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afdcb..9b0a2573fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -152,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -231,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 @@ -269,6 +291,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -307,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92cb4..958e6e0645 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba78..50c7f25bd1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0da..ced21d66d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e1b..226c044190 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3af..3122737cd4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -150,9 +153,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -330,6 +352,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -368,6 +392,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa7281b..eb5d3467ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -150,9 +153,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -334,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -372,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e015033..e921ebc60b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -150,9 +153,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -334,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -372,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e9f..38ba81c55c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 @@ -266,6 +288,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -304,6 +328,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214a7..71f0400977 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 @@ -338,6 +360,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -376,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d901..c44a313322 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -82,6 +82,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -110,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -153,9 +156,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 @@ -329,6 +351,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c @@ -415,6 +439,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3d..1ec2b1407b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240a..24b346e1a3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb28402..ebd213100b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b5..824b9c2d5e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab7..4f178a2293 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a1d..5cc1e8eb2f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { @@ -971,23 +981,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -2229,3 +2222,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410b7..1851df14e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b5617316..0b43c69365 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751cd..e1ec0dbe4e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656f6..880c6d6e31 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776db..7c8452a63e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f90..b8ef95b0fa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d072f..2ffdf861f7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d532121..2af3b5c762 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e67e..1da08d5267 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b62c..b7a251353b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354c8..6e85b0aac9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff06d..f15dadf055 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed584..28b487df25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a4b..1e7f321e43 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc58..524b0820cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d524763d..f485dbf456 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 @@ -379,4 +380,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c747706131..70b35bf3b0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f6d..1893e2fe88 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -324,4 +324,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346cf..16a4017da0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 @@ -318,4 +320,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018da..7e567f1eff 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b81..38ae55e5ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca7a..55e92e60a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c39f..60658d6a02 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc435..e203e8a7ed 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b44..5944b97d54 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e16357d..c66d416dad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9f7..a5459e766f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -325,4 +325,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad50..01d86825bb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d741..7b703e77cd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3a..17c53bd9b3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 @@ -449,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -467,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -499,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -544,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74d..2392226a74 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 @@ -449,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -467,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -499,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -544,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3a..51e13eb055 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee77..d002d8ef3c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8b..3f863d898d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acfd..61c7293106 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c7900..b5d17414f0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 4740b83485..5537148dcb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,30 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -515,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -556,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1723,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1767,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1796,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1828,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1896,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1948,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -2485,7 +2522,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -2557,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3473,7 +3510,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3504,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3765,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3805,12 +3842,15 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2e + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -3947,7 +3987,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3975,7 +4015,7 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_MAX = 0x6 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -3991,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4078,6 +4118,107 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Clockid int32 + Rsv [2]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4259,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { @@ -4605,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5351,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc04142..ad05b51a60 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af468..2e5d5a4435 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fba6..3ca814f54d 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac70a..b6e1ab76f8 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f3c..4a32543868 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -166,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -211,6 +215,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -307,6 +315,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole @@ -715,20 +727,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -884,6 +888,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1368,9 +1377,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } @@ -1673,13 +1684,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db0a..9d138de5fe 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 @@ -1060,6 +1061,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -2003,7 +2005,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2033,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -2144,6 +2204,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. @@ -3404,3 +3590,14 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75b5..01c0716c2c 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -180,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -246,7 +252,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -272,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -346,8 +356,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -477,12 +489,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -788,6 +804,14 @@ func FreeSid(sid *SID) (err error) { return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) len = uint32(r0) @@ -1589,6 +1613,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1621,6 +1653,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) @@ -2149,6 +2221,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2157,6 +2238,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -2358,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2374,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { @@ -3025,6 +3131,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3041,6 +3155,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { @@ -4073,6 +4195,12 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -4106,6 +4234,15 @@ func IsWindowVisible(hwnd HWND) (isVisible bool) { return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) @@ -4115,6 +4252,20 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { diff --git a/vendor/golang.org/x/term/LICENSE b/vendor/golang.org/x/term/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/term/LICENSE +++ b/vendor/golang.org/x/term/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md index d03d0aefef..05ff623f94 100644 --- a/vendor/golang.org/x/term/README.md +++ b/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f560604..df6bf948e1 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/modules.txt b/vendor/modules.txt index 717a17d095..0da254b537 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,7 @@ # github.com/IBM/sarama v1.43.2 ## explicit; go 1.19 github.com/IBM/sarama +github.com/IBM/sarama/mocks # github.com/ant0ine/go-json-rest v3.3.2+incompatible ## explicit github.com/ant0ine/go-json-rest/rest @@ -360,14 +361,20 @@ github.com/rinchsan/device-check-go # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/sideshow/apns2 v0.23.0 +## explicit; go 1.15 +github.com/sideshow/apns2 +github.com/sideshow/apns2/payload +github.com/sideshow/apns2/token # github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 ## explicit; go 1.22 github.com/tidepool-org/clinic/client # github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 ## explicit; go 1.22 github.com/tidepool-org/devices/api -# github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c +# github.com/tidepool-org/go-common v0.12.2 ## explicit; go 1.22 +github.com/tidepool-org/go-common/asyncevents github.com/tidepool-org/go-common/clients github.com/tidepool-org/go-common/clients/disc github.com/tidepool-org/go-common/clients/hakken @@ -482,8 +489,8 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.24.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.32.0 +## explicit; go 1.20 golang.org/x/crypto/md4 golang.org/x/crypto/ocsp golang.org/x/crypto/pbkdf2 @@ -503,7 +510,7 @@ golang.org/x/lint/golint golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.26.0 +# golang.org/x/net v0.34.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -522,20 +529,20 @@ golang.org/x/net/trace golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal -# golang.org/x/sync v0.7.0 +# golang.org/x/sync v0.10.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.21.0 +# golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.21.0 +# golang.org/x/term v0.28.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.16.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap