Merge pull request #6 from zdam-egzamin-zawodowy/query-complexity

query complexity
This commit is contained in:
Dawid Wysokiński 2021-03-24 06:23:32 +01:00 committed by GitHub
commit 48d2adb7ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 437 additions and 127 deletions

View File

@ -0,0 +1,225 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package dataloader
import (
"sync"
"time"
"github.com/zdam-egzamin-zawodowy/backend/internal/models"
)
// QualificationSliceByProfessionIDLoaderConfig captures the config to create a new QualificationSliceByProfessionIDLoader
type QualificationSliceByProfessionIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([][]*models.Qualification, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewQualificationSliceByProfessionIDLoader creates a new QualificationSliceByProfessionIDLoader given a fetch, wait, and maxBatch
func NewQualificationSliceByProfessionIDLoader(config QualificationSliceByProfessionIDLoaderConfig) *QualificationSliceByProfessionIDLoader {
return &QualificationSliceByProfessionIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// QualificationSliceByProfessionIDLoader batches and caches requests
type QualificationSliceByProfessionIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([][]*models.Qualification, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int][]*models.Qualification
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *qualificationSliceByProfessionIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type qualificationSliceByProfessionIDLoaderBatch struct {
keys []int
data [][]*models.Qualification
error []error
closing bool
done chan struct{}
}
// Load a Qualification by key, batching and caching will be applied automatically
func (l *QualificationSliceByProfessionIDLoader) Load(key int) ([]*models.Qualification, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Qualification.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *QualificationSliceByProfessionIDLoader) LoadThunk(key int) func() ([]*models.Qualification, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]*models.Qualification, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &qualificationSliceByProfessionIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]*models.Qualification, error) {
<-batch.done
var data []*models.Qualification
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *QualificationSliceByProfessionIDLoader) LoadAll(keys []int) ([][]*models.Qualification, []error) {
results := make([]func() ([]*models.Qualification, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
qualifications := make([][]*models.Qualification, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
qualifications[i], errors[i] = thunk()
}
return qualifications, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Qualifications.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *QualificationSliceByProfessionIDLoader) LoadAllThunk(keys []int) func() ([][]*models.Qualification, []error) {
results := make([]func() ([]*models.Qualification, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]*models.Qualification, []error) {
qualifications := make([][]*models.Qualification, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
qualifications[i], errors[i] = thunk()
}
return qualifications, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *QualificationSliceByProfessionIDLoader) Prime(key int, value []*models.Qualification) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]*models.Qualification, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *QualificationSliceByProfessionIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *QualificationSliceByProfessionIDLoader) unsafeSet(key int, value []*models.Qualification) {
if l.cache == nil {
l.cache = map[int][]*models.Qualification{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *qualificationSliceByProfessionIDLoaderBatch) keyIndex(l *QualificationSliceByProfessionIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *qualificationSliceByProfessionIDLoaderBatch) startTimer(l *QualificationSliceByProfessionIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *qualificationSliceByProfessionIDLoaderBatch) end(l *QualificationSliceByProfessionIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -2,7 +2,7 @@ package httpdelivery
import (
"fmt"
"github.com/zdam-egzamin-zawodowy/backend/internal/models"
"github.com/zdam-egzamin-zawodowy/backend/internal/graphql/querycomplexity"
"time"
"github.com/99designs/gqlgen/graphql/handler"
@ -21,7 +21,6 @@ const (
playgroundTTL = time.Hour / time.Second
graphqlEndpoint = "/graphql"
playgroundEndpoint = "/"
complexityLimit = 1000
)
type Config struct {
@ -55,8 +54,8 @@ func graphqlHandler(cfg generated.Config) gin.HandlerFunc {
srv.Use(extension.AutomaticPersistedQuery{
Cache: lru.New(100),
})
srv.SetQueryCache(lru.New(1000))
srv.Use(extension.FixedComplexityLimit(complexityLimit))
srv.SetQueryCache(lru.New(100))
srv.Use(querycomplexity.GetComplexityLimitExtension())
if mode.Get() == mode.DevelopmentMode {
srv.Use(extension.Introspection{})
}
@ -80,122 +79,9 @@ func playgroundHandler() gin.HandlerFunc {
func prepareConfig(r *resolvers.Resolver, d *directive.Directive) generated.Config {
cfg := generated.Config{
Resolvers: r,
Complexity: getComplexityRoot(),
Complexity: querycomplexity.GetComplexityRoot(),
}
cfg.Directives.Authenticated = d.Authenticated
cfg.Directives.HasRole = d.HasRole
return cfg
}
func getComplexityRoot() generated.ComplexityRoot {
complexityRoot := generated.ComplexityRoot{}
complexityRoot.Query.GenerateTest = func(childComplexity int, qualificationIDs []int, limit *int) int {
return 300 + childComplexity
}
complexityRoot.Query.Professions = func(
childComplexity int,
filter *models.ProfessionFilter,
limit *int,
offset *int,
sort []string,
) int {
return 200 + childComplexity
}
complexityRoot.Query.Qualifications = func(
childComplexity int,
filter *models.QualificationFilter,
limit *int,
offset *int,
sort []string,
) int {
return 200 + childComplexity
}
complexityRoot.Query.Questions = func(
childComplexity int,
filter *models.QuestionFilter,
limit *int,
offset *int,
sort []string,
) int {
return 200 + childComplexity
}
complexityRoot.Query.Users = func(
childComplexity int,
filter *models.UserFilter,
limit *int,
offset *int,
sort []string,
) int {
return 200 + childComplexity
}
complexityRoot.Mutation.CreateProfession = func(childComplexity int, input models.ProfessionInput) int {
return 200 + childComplexity
}
complexityRoot.Mutation.CreateQualification = func(
childComplexity int,
input models.QualificationInput,
) int {
return 200 + childComplexity
}
complexityRoot.Mutation.CreateQuestion = func(childComplexity int, input models.QuestionInput) int {
return 400 + childComplexity
}
complexityRoot.Mutation.CreateUser = func(childComplexity int, input models.UserInput) int {
return 200 + childComplexity
}
complexityRoot.Mutation.SignIn = func(
childComplexity int,
email string,
password string,
staySignedIn *bool,
) int {
return 400 + childComplexity
}
complexityRoot.Mutation.DeleteProfessions = func(childComplexity int, ids []int) int {
return 200 + childComplexity
}
complexityRoot.Mutation.DeleteQualifications = func(childComplexity int, ids []int) int {
return 200 + childComplexity
}
complexityRoot.Mutation.DeleteQuestions = func(childComplexity int, ids []int) int {
return 400 + childComplexity
}
complexityRoot.Mutation.DeleteUsers = func(childComplexity int, ids []int) int {
return 200 + childComplexity
}
complexityRoot.Mutation.UpdateManyUsers = func(
childComplexity int,
ids []int,
input models.UserInput,
) int {
return 200 + childComplexity
}
complexityRoot.Mutation.UpdateProfession = func(
childComplexity int,
id int,
input models.ProfessionInput,
) int {
return 200 + childComplexity
}
complexityRoot.Mutation.UpdateQualification = func(
childComplexity int,
id int,
input models.QualificationInput,
) int {
return 200 + childComplexity
}
complexityRoot.Mutation.UpdateQuestion = func(
childComplexity int,
id int,
input models.QuestionInput,
) int {
return 400 + childComplexity
}
complexityRoot.Mutation.UpdateUser = func(childComplexity int, id int, input models.UserInput) int {
return 200 + childComplexity
}
complexityRoot.Profession.Qualifications = func(childComplexity int) int {
return 50 + childComplexity
}
return complexityRoot
}

View File

@ -0,0 +1,199 @@
package querycomplexity
import (
"github.com/99designs/gqlgen/graphql/handler/extension"
"github.com/zdam-egzamin-zawodowy/backend/internal/graphql/generated"
"github.com/zdam-egzamin-zawodowy/backend/internal/models"
"github.com/zdam-egzamin-zawodowy/backend/internal/profession"
"github.com/zdam-egzamin-zawodowy/backend/internal/qualification"
"github.com/zdam-egzamin-zawodowy/backend/internal/question"
"github.com/zdam-egzamin-zawodowy/backend/internal/user"
"github.com/zdam-egzamin-zawodowy/backend/pkg/utils"
)
const (
complexityLimit = 10000
countComplexity = 1000
professionsTotalFieldComplexity = 100
qualificationsTotalFieldComplexity = 100
questionsTotalFieldComplexity = 300
usersTotalFieldComplexity = 50
)
func GetComplexityLimitExtension() *extension.ComplexityLimit {
return extension.FixedComplexityLimit(complexityLimit)
}
func GetComplexityRoot() generated.ComplexityRoot {
complexityRoot := generated.ComplexityRoot{}
complexityRoot.Profession.Qualifications = func(childComplexity int) int {
return 10 + childComplexity
}
complexityRoot.ProfessionList.Total = getCountComplexity
complexityRoot.Query.Professions = func(
childComplexity int,
filter *models.ProfessionFilter,
limit *int,
offset *int,
sort []string,
) int {
return computeComplexity(
childComplexity,
utils.SafeIntPointer(limit, profession.FetchDefaultLimit),
professionsTotalFieldComplexity,
1,
)
}
complexityRoot.QualificationList.Total = getCountComplexity
complexityRoot.Query.Qualifications = func(
childComplexity int,
filter *models.QualificationFilter,
limit *int,
offset *int,
sort []string,
) int {
return computeComplexity(
childComplexity,
utils.SafeIntPointer(limit, qualification.FetchDefaultLimit),
qualificationsTotalFieldComplexity,
1,
)
}
complexityRoot.QuestionList.Total = getCountComplexity
complexityRoot.Query.Questions = func(
childComplexity int,
filter *models.QuestionFilter,
limit *int,
offset *int,
sort []string,
) int {
return computeComplexity(
childComplexity,
utils.SafeIntPointer(limit, question.FetchDefaultLimit),
questionsTotalFieldComplexity,
1,
)
}
complexityRoot.Query.GenerateTest = func(childComplexity int, qualificationIDs []int, limit *int) int {
return computeComplexity(
childComplexity,
utils.SafeIntPointer(limit, question.TestMaxLimit),
0,
3,
)
}
complexityRoot.UserList.Total = getCountComplexity
complexityRoot.Query.Users = func(
childComplexity int,
filter *models.UserFilter,
limit *int,
offset *int,
sort []string,
) int {
return computeComplexity(
childComplexity,
utils.SafeIntPointer(limit, user.FetchMaxLimit),
usersTotalFieldComplexity,
1,
)
}
complexityRoot.Mutation.CreateProfession = func(childComplexity int, input models.ProfessionInput) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.CreateQualification = func(
childComplexity int,
input models.QualificationInput,
) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.CreateQuestion = func(childComplexity int, input models.QuestionInput) int {
return (complexityLimit / 4) + childComplexity
}
complexityRoot.Mutation.CreateUser = func(childComplexity int, input models.UserInput) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.SignIn = func(
childComplexity int,
email string,
password string,
staySignedIn *bool,
) int {
return (complexityLimit / 2) + childComplexity
}
complexityRoot.Mutation.UpdateManyUsers = func(
childComplexity int,
ids []int,
input models.UserInput,
) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.UpdateProfession = func(
childComplexity int,
id int,
input models.ProfessionInput,
) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.UpdateQualification = func(
childComplexity int,
id int,
input models.QualificationInput,
) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.UpdateQuestion = func(
childComplexity int,
id int,
input models.QuestionInput,
) int {
return (complexityLimit / 4) + childComplexity
}
complexityRoot.Mutation.UpdateUser = func(childComplexity int, id int, input models.UserInput) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.DeleteProfessions = func(childComplexity int, ids []int) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.DeleteQualifications = func(childComplexity int, ids []int) int {
return (complexityLimit / 5) + childComplexity
}
complexityRoot.Mutation.DeleteQuestions = func(childComplexity int, ids []int) int {
return (complexityLimit / 4) + childComplexity
}
complexityRoot.Mutation.DeleteUsers = func(childComplexity int, ids []int) int {
return (complexityLimit / 5) + childComplexity
}
return complexityRoot
}
func computeComplexity(childComplexity, limit, totalFieldComplexity, multiplyBy int) int {
complexity := 0
if childComplexity >= countComplexity {
childComplexity -= countComplexity
complexity += totalFieldComplexity
}
return limit*childComplexity*multiplyBy + complexity
}
func getCountComplexity(childComplexity int) int {
return countComplexity + childComplexity
}

View File

@ -41,7 +41,7 @@ func (r *queryResolver) Professions(
&profession.FetchConfig{
Count: shouldCount(ctx),
Filter: filter,
Limit: utils.SafeIntPointer(limit, profession.DefaultLimit),
Limit: utils.SafeIntPointer(limit, profession.FetchDefaultLimit),
Offset: utils.SafeIntPointer(offset, 0),
Sort: sort,
},

View File

@ -40,7 +40,7 @@ func (r *queryResolver) Qualifications(
&qualification.FetchConfig{
Count: shouldCount(ctx),
Filter: filter,
Limit: utils.SafeIntPointer(limit, qualification.DefaultLimit),
Limit: utils.SafeIntPointer(limit, qualification.FetchDefaultLimit),
Offset: utils.SafeIntPointer(offset, 0),
Sort: sort,
},

View File

@ -1,6 +1,6 @@
package profession
const (
DefaultLimit = 100
MaxNameLength = 100
FetchDefaultLimit = 100
MaxNameLength = 100
)

View File

@ -61,7 +61,7 @@ func (ucase *usecase) Delete(ctx context.Context, f *models.ProfessionFilter) ([
func (ucase *usecase) Fetch(ctx context.Context, cfg *profession.FetchConfig) ([]*models.Profession, int, error) {
if cfg == nil {
cfg = &profession.FetchConfig{
Limit: profession.DefaultLimit,
Limit: profession.FetchDefaultLimit,
Count: true,
}
}

View File

@ -1,6 +1,6 @@
package qualification
const (
DefaultLimit = 100
MaxNameLength = 100
FetchDefaultLimit = 100
MaxNameLength = 100
)

View File

@ -61,7 +61,7 @@ func (ucase *usecase) Delete(ctx context.Context, f *models.QualificationFilter)
func (ucase *usecase) Fetch(ctx context.Context, cfg *qualification.FetchConfig) ([]*models.Qualification, int, error) {
if cfg == nil {
cfg = &qualification.FetchConfig{
Limit: qualification.DefaultLimit,
Limit: qualification.FetchDefaultLimit,
Count: true,
}
}

View File

@ -15,5 +15,5 @@ func Wrapf(details error, message string, args ...interface{}) error {
if mode.Get() != mode.ProductionMode {
return errors.Wrapf(details, message, args...)
}
return fmt.Errorf(message)
return fmt.Errorf(message, args...)
}