Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(core): change delete-on-updates to remove-on-update and set default false #9319

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 12 additions & 9 deletions dgraph/cmd/alpha/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,12 +134,15 @@ they form a Raft group and provide synchronous replication.
Flag("percentage",
"Cache percentages summing up to 100 for various caches (FORMAT: PostingListCache,"+
"PstoreBlockCache,PstoreIndexCache)").
Flag("delete-on-updates",
"When set as true, we would delete the key from the cache once it's updated. If it's not "+
"we would update the value inside the cache. If the cache gets too full, it starts"+
" to get slow. So if your usecase has a lot of heavy mutations, this should be set"+
" as true. If you are modifying same data again and again, this should be set as"+
" false").
Flag("remove-on-update",
"Dgraph implements a read-through cache (using Ristretto) to optimize query performance. On "+
"mutation, the new value is first written to disk and then updated in the cache "+
"before the transaction completes to ensure there are no stale reads during parallel "+
"transactions. In mutation-heavy use cases, it may be more advantageous to remove "+
"the cache entry on mutation instead of updating the value, as this executes ~14\\%"+
"faster on write. The new value will be added to the cache the first time it is "+
"queried, slightly delaying that read. To use this approach, set the --cache "+
"remove-on-update flag.").
String())

flag.String("raft", worker.RaftDefaults, z.NewSuperFlagHelp(worker.RaftDefaults).
Expand Down Expand Up @@ -628,7 +631,7 @@ func run() {
x.AssertTruef(totalCache >= 0, "ERROR: Cache size must be non-negative")

cachePercentage := cache.GetString("percentage")
deleteOnUpdates := cache.GetBool("delete-on-updates")
removeOnUpdate := cache.GetBool("remove-on-update")
cachePercent, err := x.GetCachePercentages(cachePercentage, 3)
x.Check(err)
postingListCacheSize := (cachePercent[0] * (totalCache << 20)) / 100
Expand All @@ -651,7 +654,7 @@ func run() {
WALDir: Alpha.Conf.GetString("wal"),
CacheMb: totalCache,
CachePercentage: cachePercentage,
DeleteOnUpdates: deleteOnUpdates,
RemoveOnUpdate: removeOnUpdate,

MutationsMode: worker.AllowMutations,
AuthToken: security.GetString("token"),
Expand Down Expand Up @@ -779,7 +782,7 @@ func run() {
// Posting will initialize index which requires schema. Hence, initialize
// schema before calling posting.Init().
schema.Init(worker.State.Pstore)
posting.Init(worker.State.Pstore, postingListCacheSize, deleteOnUpdates)
posting.Init(worker.State.Pstore, postingListCacheSize, removeOnUpdate)
defer posting.Cleanup()
worker.Init(worker.State.Pstore)

Expand Down
4 changes: 2 additions & 2 deletions posting/lists.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ var (
)

// Init initializes the posting lists package, the in memory and dirty list hash.
func Init(ps *badger.DB, cacheSize int64, deleteOnUpdates bool) {
func Init(ps *badger.DB, cacheSize int64, removeOnUpdate bool) {
pstore = ps
closer = z.NewCloser(1)
go x.MonitorMemoryMetrics(closer)

memoryLayer = initMemoryLayer(cacheSize, deleteOnUpdates)
memoryLayer = initMemoryLayer(cacheSize, removeOnUpdate)
}

func UpdateMaxCost(maxCost int64) {
Expand Down
8 changes: 4 additions & 4 deletions posting/mvcc.go
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ func (c *Cache) clear() {

type MemoryLayer struct {
// config
deleteOnUpdates bool
removeOnUpdate bool

// data
cache *Cache
Expand All @@ -410,9 +410,9 @@ func GetStatsHolder() *StatsHolder {
return memoryLayer.statsHolder
}

func initMemoryLayer(cacheSize int64, deleteOnUpdates bool) *MemoryLayer {
func initMemoryLayer(cacheSize int64, removeOnUpdate bool) *MemoryLayer {
ml := &MemoryLayer{}
ml.deleteOnUpdates = deleteOnUpdates
ml.removeOnUpdate = removeOnUpdate
ml.statsHolder = NewStatsHolder()
if cacheSize > 0 {
cache, err := ristretto.NewCache[[]byte, *CachePL](&ristretto.Config[[]byte, *CachePL]{
Expand Down Expand Up @@ -476,7 +476,7 @@ func (ml *MemoryLayer) updateItemInCache(key string, delta []byte, startTs, comm
return
}

if ml.deleteOnUpdates {
if ml.removeOnUpdate {
// TODO We should mark the key as deleted instead of directly deleting from the cache.
ml.del([]byte(key))
return
Expand Down
4 changes: 2 additions & 2 deletions worker/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ type Options struct {
CachePercentage string
// CacheMb is the total memory allocated between all the caches.
CacheMb int64
// DeleteOnUpdates is the parameter that allows the user to set if the cache should keep the items that were
// RemoveOnUpdate is the parameter that allows the user to set if the cache should keep the items that were
// just mutated. Keeping these items are good when there is a mixed workload where you are updating the
// same element multiple times. However, for a heavy mutation workload, not keeping these items would be better
// , as keeping these elements bloats the cache making it slow.
DeleteOnUpdates bool
RemoveOnUpdate bool

Audit *x.LoggerConf

Expand Down
2 changes: 1 addition & 1 deletion worker/server_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ const (
ZeroLimitsDefaults = `uid-lease=0; refill-interval=30s; disable-admin-http=false;`
GraphQLDefaults = `introspection=true; debug=false; extensions=true; poll-interval=1s; ` +
`lambda-url=;`
CacheDefaults = `size-mb=1024; percentage=40,40,20; delete-on-updates=true`
CacheDefaults = `size-mb=1024; percentage=40,40,20; remove-on-update=false`
FeatureFlagsDefaults = `normalize-compatibility-mode=`
)

Expand Down
Loading