diff --git a/.golangci.bck.yml b/.golangci.bck.yml new file mode 100644 index 0000000000..5ed4d715cf --- /dev/null +++ b/.golangci.bck.yml @@ -0,0 +1,87 @@ +# options for analysis running +run: + timeout: 5m + +linters: + disable-all: true + enable: + # Default set of linters from golangci-lint + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - typecheck + - unused + # Linters added by the stash project. + # - contextcheck + - copyloopvar + - dogsled + - errchkjson + - errorlint + # - exhaustive + - gocritic + # - goerr113 + - gofmt + # - gomnd + # - ifshort + - misspell + # - nakedret + - noctx + - revive + - rowserrcheck + - sqlclosecheck + +# Project-specific linter overrides +linters-settings: + gofmt: + simplify: false + + errorlint: + # Disable errorf because there are false positives, where you don't want to wrap + # an error. + errorf: false + asserts: true + comparison: true + + revive: + ignore-generated-header: true + severity: error + confidence: 0.8 + rules: + - name: blank-imports + disabled: true + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + disabled: true + - name: if-return + disabled: true + - name: increment-decrement + - name: var-naming + disabled: true + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + disabled: true + - name: indent-error-flow + disabled: true + - name: errorf + - name: empty-block + disabled: true + - name: superfluous-else + - name: unused-parameter + disabled: true + - name: unreachable-code + - name: redefines-builtin-id + + rowserrcheck: + packages: + - github.com/jmoiron/sqlx diff --git a/.golangci.yml b/.golangci.yml index 5ed4d715cf..dc1e4536af 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,87 +1,86 @@ -# options for analysis running -run: - timeout: 5m - +version: "2" linters: - disable-all: true + default: none enable: - # Default set of linters from golangci-lint - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - typecheck - - unused - # Linters added by the stash project. - # - contextcheck - copyloopvar - dogsled + - errcheck - errchkjson - errorlint - # - exhaustive - gocritic - # - goerr113 - - gofmt - # - gomnd - # - ifshort + - govet + - ineffassign - misspell - # - nakedret - noctx - revive - rowserrcheck - sqlclosecheck - -# Project-specific linter overrides -linters-settings: - gofmt: - simplify: false - - errorlint: - # Disable errorf because there are false positives, where you don't want to wrap - # an error. - errorf: false - asserts: true - comparison: true - - revive: - ignore-generated-header: true - severity: error - confidence: 0.8 - rules: - - name: blank-imports - disabled: true - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: exported - disabled: true - - name: if-return - disabled: true - - name: increment-decrement - - name: var-naming - disabled: true - - name: var-declaration - - name: package-comments - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - disabled: true - - name: indent-error-flow - disabled: true - - name: errorf - - name: empty-block - disabled: true - - name: superfluous-else - - name: unused-parameter - disabled: true - - name: unreachable-code - - name: redefines-builtin-id - - rowserrcheck: - packages: - - github.com/jmoiron/sqlx + - staticcheck + - unused + settings: + errorlint: + errorf: false + asserts: true + comparison: true + revive: + confidence: 0.8 + severity: error + rules: + - name: blank-imports + disabled: true + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + disabled: true + - name: if-return + disabled: true + - name: increment-decrement + - name: var-naming + disabled: true + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + disabled: true + - name: indent-error-flow + disabled: true + - name: errorf + - name: empty-block + disabled: true + - name: superfluous-else + - name: unused-parameter + disabled: true + - name: unreachable-code + - name: redefines-builtin-id + rowserrcheck: + packages: + - github.com/jmoiron/sqlx + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + settings: + gofmt: + simplify: false + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/docs/dev/AUDIO.md b/docs/dev/AUDIO.md new file mode 100644 index 0000000000..91bd76a372 --- /dev/null +++ b/docs/dev/AUDIO.md @@ -0,0 +1,43 @@ +# Audio Datatype + +The `Audio` datatype is similar to `Scene` but stores audio-only media (i.e. Audiobooks, music, ASMR, etc). + +## Scope + +- This ticket adds backend support for Audio Only, future tickets can add the UI elements +- Audio metadata: + - Title + - Artists (string? like director) + - Date + - Studio + - Performers + - Tags + - Details + - Urls + - Rating + - Organized + - O History + - Play History + - Studio Code + - NICE TO HAVES + - Groups +- Audio File metadata: + - duration + - audio codec + - FUTURE (to be considered at a later date) + - channels (mono, stereo, 5.1, 7.1) + - bitrate + - sample rate + + +## TODO List + +- [ ] `pkg/sqlite/migrations/86_audio.up.sql` + - Create a migration for the Audio type, very similar to Scene +- [ ] Duplicate much of `pkg/scene/*` into `pkg/audio/*` + - Exclude: markers, screenshot, preview, transcode, sprite +- [ ] Graphql + - [ ] Copy/modify `graphql/schema/types/scene.graphql` to `graphql/schema/types/audio.graphql` + +### Last Steps +- [ ] Delete this file upon completion of the feature \ No newline at end of file diff --git a/gqlgen.yml b/gqlgen.yml index 4a3d73d519..7daa421635 100644 --- a/gqlgen.yml +++ b/gqlgen.yml @@ -44,6 +44,13 @@ models: fieldName: DurationFinite frame_rate: fieldName: FrameRateFinite + AudioFile: + fields: + # override float fields - #1572 + duration: + fieldName: DurationFinite + sample_rate: + fieldName: SampleRateFinite # movie is group under the hood Movie: model: github.com/stashapp/stash/pkg/models.Group @@ -96,6 +103,8 @@ models: model: github.com/stashapp/stash/internal/manager.StashBoxBatchTagInput SceneStreamEndpoint: model: github.com/stashapp/stash/internal/manager.SceneStreamEndpoint + AudioStreamEndpoint: + model: github.com/stashapp/stash/internal/manager.AudioStreamEndpoint ExportObjectTypeInput: model: github.com/stashapp/stash/internal/manager.ExportObjectTypeInput ExportObjectsInput: diff --git a/graphql/schema/schema.graphql b/graphql/schema/schema.graphql index 7f07e45792..ca894397e6 100644 --- a/graphql/schema/schema.graphql +++ b/graphql/schema/schema.graphql @@ -1,3 +1,5 @@ +# TODO(audio): add findAudio, findAudios, audioCreate, audioUpdate, audioDestroy, audiosDestroy + "The query root for this schema" type Query { # Filters diff --git a/graphql/schema/types/audio.graphql b/graphql/schema/types/audio.graphql new file mode 100644 index 0000000000..7433ec22ca --- /dev/null +++ b/graphql/schema/types/audio.graphql @@ -0,0 +1,247 @@ +# TODO(audio): update this file + +type AudioFileType { + size: String + duration: Float + audio_codec: String + samplerate: Float + bitrate: Int +} + +type AudioPathsType { + screenshot: String # Resolver + preview: String # Resolver + stream: String # Resolver + webp: String # Resolver + vtt: String # Resolver + sprite: String # Resolver + funscript: String # Resolver + caption: String # Resolver +} + +type AudioGroup { + group: Group! + audio_index: Int +} + +# TODO(audio|AudioCaption): need to update IF AudioCaption required +# type AudioCaption { +# language_code: String! +# caption_type: String! +# } + +type Audio { + id: ID! + title: String + code: String + details: String + url: String @deprecated(reason: "Use urls") + urls: [String!]! + date: String + # rating expressed as 1-100 + rating100: Int + organized: Boolean! + o_counter: Int + # TODO(audio|AudioCaption): need to update IF AudioCaption required + # captions: [AudioCaption!] + captions: [VideoCaption!] + created_at: Time! + updated_at: Time! + "The last time play count was updated" + last_played_at: Time + "The time index a audio was left at" + resume_time: Float + "The total time a audio has spent playing" + play_duration: Float + "The number ot times a audio has been played" + play_count: Int + + "Times a audio was played" + play_history: [Time!]! + "Times the o counter was incremented" + o_history: [Time!]! + + files: [AudioFile!]! + paths: AudioPathsType! # Resolver + # TODO(future|audio_markers): add in audio markers + # audio_markers: [AudioMarker!]! + studio: Studio + groups: [AudioGroup!]! + tags: [Tag!]! + performers: [Performer!]! + + custom_fields: Map! + + "Return valid stream paths" + audioStreams: [AudioStreamEndpoint!]! +} + +input AudioGroupInput { + group_id: ID! + audio_index: Int +} + +input AudioCreateInput { + title: String + code: String + details: String + url: String @deprecated(reason: "Use urls") + urls: [String!] + date: String + # rating expressed as 1-100 + rating100: Int + organized: Boolean + studio_id: ID + performer_ids: [ID!] + groups: [AudioGroupInput!] + tag_ids: [ID!] + "This should be a URL or a base64 encoded data URL" + cover_image: String + + """ + The first id will be assigned as primary. + Files will be reassigned from existing audios if applicable. + Files must not already be primary for another audio. + """ + file_ids: [ID!] + + custom_fields: Map +} + +input AudioUpdateInput { + clientMutationId: String + id: ID! + title: String + code: String + details: String + url: String @deprecated(reason: "Use urls") + urls: [String!] + date: String + # rating expressed as 1-100 + rating100: Int + o_counter: Int + @deprecated(reason: "Unsupported - Use audioIncrementO/audioDecrementO") + organized: Boolean + studio_id: ID + performer_ids: [ID!] + groups: [AudioGroupInput!] + tag_ids: [ID!] + "This should be a URL or a base64 encoded data URL" + cover_image: String + + "The time index a audio was left at" + resume_time: Float + "The total time a audio has spent playing" + play_duration: Float + "The number ot times a audio has been played" + play_count: Int + @deprecated( + reason: "Unsupported - Use audioIncrementPlayCount/audioDecrementPlayCount" + ) + + primary_file_id: ID + + custom_fields: CustomFieldsInput +} + +input BulkAudioUpdateInput { + clientMutationId: String + ids: [ID!] + title: String + code: String + details: String + url: String @deprecated(reason: "Use urls") + urls: BulkUpdateStrings + date: String + # rating expressed as 1-100 + rating100: Int + organized: Boolean + studio_id: ID + performer_ids: BulkUpdateIds + tag_ids: BulkUpdateIds + group_ids: BulkUpdateIds + + custom_fields: CustomFieldsInput +} + +input AudioDestroyInput { + id: ID! + delete_file: Boolean + delete_generated: Boolean + "If true, delete the file entry from the database if the file is not assigned to any other objects" + destroy_file_entry: Boolean +} + +input AudiosDestroyInput { + ids: [ID!]! + delete_file: Boolean + delete_generated: Boolean + "If true, delete the file entry from the database if the file is not assigned to any other objects" + destroy_file_entry: Boolean +} + +type FindAudiosResultType { + count: Int! + "Total duration in seconds" + duration: Float! + "Total file size in bytes" + filesize: Float! + audios: [Audio!]! +} + +input AudioParserInput { + ignoreWords: [String!] + whitespaceCharacters: String + capitalizeTitle: Boolean + ignoreOrganized: Boolean +} + +type AudioParserResult { + audio: Audio! + title: String + code: String + details: String + url: String + date: String + # rating expressed as 1-100 + rating100: Int + studio_id: ID + performer_ids: [ID!] + tag_ids: [ID!] +} + +type AudioParserResultType { + count: Int! + results: [AudioParserResult!]! +} + +input AudioHashInput { + checksum: String + oshash: String +} + +type AudioStreamEndpoint { + url: String! + mime_type: String + label: String +} + +input AssignAudioFileInput { + audio_id: ID! + file_id: ID! +} + +input AudioMergeInput { + """ + If destination audio has no files, then the primary file of the + first source audio will be assigned as primary + """ + source: [ID!]! + destination: ID! + # values defined here will override values in the destination + values: AudioUpdateInput + + # if true, the source history will be combined with the destination + play_history: Boolean + o_history: Boolean +} diff --git a/graphql/schema/types/file.graphql b/graphql/schema/types/file.graphql index fcc2a58c82..a06b6fd5b2 100644 --- a/graphql/schema/types/file.graphql +++ b/graphql/schema/types/file.graphql @@ -124,6 +124,34 @@ type ImageFile implements BaseFile { union VisualFile = VideoFile | ImageFile +type AudioFile implements BaseFile { + # TODO: edit this + id: ID! + path: String! + basename: String! + + parent_folder_id: ID! @deprecated(reason: "Use parent_folder instead") + zip_file_id: ID @deprecated(reason: "Use zip_file instead") + + parent_folder: Folder! + zip_file: BasicFile + + mod_time: Time! + size: Int64! + + fingerprint(type: String!): String + fingerprints: [Fingerprint!]! + + format: String! + duration: Float! + audio_codec: String! + sample_rate: Float! + bit_rate: Int! + + created_at: Time! + updated_at: Time! +} + type GalleryFile implements BaseFile { id: ID! path: String! diff --git a/graphql/schema/types/filters.graphql b/graphql/schema/types/filters.graphql index c7d8802662..3f56521d58 100644 --- a/graphql/schema/types/filters.graphql +++ b/graphql/schema/types/filters.graphql @@ -1,3 +1,5 @@ +# TODO(audio): add AudioFilterType + enum SortDirectionEnum { ASC DESC @@ -777,6 +779,97 @@ input ImageFilterType { custom_fields: [CustomFieldCriterionInput!] } +input AudioFilterType { + AND: AudioFilterType + OR: AudioFilterType + NOT: AudioFilterType + + id: IntCriterionInput + title: StringCriterionInput + code: StringCriterionInput + details: StringCriterionInput + + "Filter by file oshash" + oshash: StringCriterionInput + "Filter by file checksum" + checksum: StringCriterionInput + "Filter by file phash" + phash: StringCriterionInput @deprecated(reason: "Use phash_distance instead") + "Filter by file phash distance" + phash_distance: PhashDistanceCriterionInput + "Filter by path" + path: StringCriterionInput + "Filter by file count" + file_count: IntCriterionInput + # rating expressed as 1-100 + rating100: IntCriterionInput + "Filter by organized" + organized: Boolean + "Filter by o-counter" + o_counter: IntCriterionInput + "Filter Scenes by duplication criteria" + duplicated: DuplicationCriterionInput + "Filter by sample rate" + samplerate: IntCriterionInput + "Filter by bit rate" + bitrate: IntCriterionInput + "Filter by audio codec" + audio_codec: StringCriterionInput + "Filter by duration (in seconds)" + duration: IntCriterionInput + "Filter to only include scenes missing this property" + is_missing: String + "Filter to only include scenes with this studio" + studios: HierarchicalMultiCriterionInput + "Filter to only include scenes with this group" + groups: HierarchicalMultiCriterionInput + "Filter to only include scenes with these tags" + tags: HierarchicalMultiCriterionInput + "Filter by tag count" + tag_count: IntCriterionInput + "Filter to only include scenes with performers with these tags" + performer_tags: HierarchicalMultiCriterionInput + "Filter scenes that have performers that have been favorited" + performer_favorite: Boolean + "Filter scenes by performer age at time of scene" + performer_age: IntCriterionInput + "Filter to only include scenes with these performers" + performers: MultiCriterionInput + "Filter by performer count" + performer_count: IntCriterionInput + "Filter by url" + url: StringCriterionInput + "Filter by captions" + captions: StringCriterionInput + "Filter by resume time" + resume_time: IntCriterionInput + "Filter by play count" + play_count: IntCriterionInput + "Filter by play duration (in seconds)" + play_duration: IntCriterionInput + "Filter by scene last played time" + last_played_at: TimestampCriterionInput + "Filter by date" + date: DateCriterionInput + "Filter by creation time" + created_at: TimestampCriterionInput + "Filter by last update time" + updated_at: TimestampCriterionInput + + "Filter by related performers that meet this criteria" + performers_filter: PerformerFilterType + "Filter by related studios that meet this criteria" + studios_filter: StudioFilterType + "Filter by related tags that meet this criteria" + tags_filter: TagFilterType + "Filter by related groups that meet this criteria" + groups_filter: GroupFilterType + "Filter by related files that meet this criteria" + files_filter: FileFilterType + + custom_fields: [CustomFieldCriterionInput!] +} + input FileFilterType { AND: FileFilterType OR: FileFilterType @@ -862,6 +955,17 @@ input VideoFileFilterInput { interactive: Boolean interactive_speed: IntCriterionInput } +input AudioFileFilterInput { + samplerate: IntCriterionInput + bitrate: IntCriterionInput + format: StringCriterionInput + audio_codec: StringCriterionInput + + "in seconds" + duration: IntCriterionInput + + captions: StringCriterionInput +} input ImageFileFilterInput { format: StringCriterionInput diff --git a/internal/api/changeset_translator.go b/internal/api/changeset_translator.go index 45285bddec..c8fc6047e0 100644 --- a/internal/api/changeset_translator.go +++ b/internal/api/changeset_translator.go @@ -395,10 +395,37 @@ func (t changesetTranslator) relatedGroups(value []models.SceneGroupInput) (mode if err != nil { return models.RelatedGroups{}, err } - + return models.NewRelatedGroups(groupsScenes), nil } +func groupsAudioFromGroupInput(input []models.AudioGroupInput) ([]models.GroupsAudios, error) { + ret := make([]models.GroupsAudios, len(input)) + + for i, v := range input { + mID, err := strconv.Atoi(v.GroupID) + if err != nil { + return nil, fmt.Errorf("invalid group ID: %s", v.GroupID) + } + + ret[i] = models.GroupsAudios{ + GroupID: mID, + AudioIndex: v.AudioIndex, + } + } + + return ret, nil +} + +func (t changesetTranslator) relatedGroupsAudio(value []models.AudioGroupInput) (models.RelatedGroupsAudio, error) { + groupsAudios, err := groupsAudioFromGroupInput(value) + if err != nil { + return models.RelatedGroupsAudio{}, err + } + + return models.NewRelatedGroupsAudio(groupsAudios), nil +} + func (t changesetTranslator) updateGroupIDsFromMovies(value []models.SceneMovieInput, field string) (*models.UpdateGroupIDs, error) { if !t.hasField(field) { return nil, nil @@ -452,6 +479,44 @@ func (t changesetTranslator) updateGroupIDsBulk(value *BulkUpdateIds, field stri }, nil } +func (t changesetTranslator) updateGroupIDsAudio(value []models.AudioGroupInput, field string) (*models.UpdateGroupIDsAudio, error) { + if !t.hasField(field) { + return nil, nil + } + + groupsAudios, err := groupsAudioFromGroupInput(value) + if err != nil { + return nil, err + } + + return &models.UpdateGroupIDsAudio{ + Groups: groupsAudios, + Mode: models.RelationshipUpdateModeSet, + }, nil +} + +func (t changesetTranslator) updateGroupIDsBulkAudio(value *BulkUpdateIds, field string) (*models.UpdateGroupIDsAudio, error) { + if !t.hasField(field) || value == nil { + return nil, nil + } + + ids, err := stringslice.StringSliceToIntSlice(value.Ids) + if err != nil { + return nil, fmt.Errorf("converting ids [%v]: %w", value.Ids, err) + } + + groups := make([]models.GroupsAudios, len(ids)) + for i, id := range ids { + groups[i] = models.GroupsAudios{GroupID: id} + } + + return &models.UpdateGroupIDsAudio{ + Groups: groups, + Mode: value.Mode, + }, nil +} + + func groupsDescriptionsFromGroupInput(input []*GroupDescriptionInput) ([]models.GroupIDDescription, error) { ret := make([]models.GroupIDDescription, len(input)) diff --git a/internal/api/loaders/audiofileidsloader_gen.go b/internal/api/loaders/audiofileidsloader_gen.go new file mode 100644 index 0000000000..6fe06ac391 --- /dev/null +++ b/internal/api/loaders/audiofileidsloader_gen.go @@ -0,0 +1,225 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// AudioFileIDsLoaderConfig captures the config to create a new AudioFileIDsLoader +type AudioFileIDsLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([][]models.FileID, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewAudioFileIDsLoader creates a new AudioFileIDsLoader given a fetch, wait, and maxBatch +func NewAudioFileIDsLoader(config AudioFileIDsLoaderConfig) *AudioFileIDsLoader { + return &AudioFileIDsLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// AudioFileIDsLoader batches and caches requests +type AudioFileIDsLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([][]models.FileID, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int][]models.FileID + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *audioFileIDsLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type audioFileIDsLoaderBatch struct { + keys []int + data [][]models.FileID + error []error + closing bool + done chan struct{} +} + +// Load a FileID by key, batching and caching will be applied automatically +func (l *AudioFileIDsLoader) Load(key int) ([]models.FileID, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a FileID. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioFileIDsLoader) LoadThunk(key int) func() ([]models.FileID, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() ([]models.FileID, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &audioFileIDsLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() ([]models.FileID, error) { + <-batch.done + + var data []models.FileID + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *AudioFileIDsLoader) LoadAll(keys []int) ([][]models.FileID, []error) { + results := make([]func() ([]models.FileID, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + fileIDs := make([][]models.FileID, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + fileIDs[i], errors[i] = thunk() + } + return fileIDs, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a FileIDs. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioFileIDsLoader) LoadAllThunk(keys []int) func() ([][]models.FileID, []error) { + results := make([]func() ([]models.FileID, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([][]models.FileID, []error) { + fileIDs := make([][]models.FileID, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + fileIDs[i], errors[i] = thunk() + } + return fileIDs, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *AudioFileIDsLoader) Prime(key int, value []models.FileID) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := make([]models.FileID, len(value)) + copy(cpy, value) + l.unsafeSet(key, cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *AudioFileIDsLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *AudioFileIDsLoader) unsafeSet(key int, value []models.FileID) { + if l.cache == nil { + l.cache = map[int][]models.FileID{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *audioFileIDsLoaderBatch) keyIndex(l *AudioFileIDsLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *audioFileIDsLoaderBatch) startTimer(l *AudioFileIDsLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *audioFileIDsLoaderBatch) end(l *AudioFileIDsLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/audiolastplayedloader_gen.go b/internal/api/loaders/audiolastplayedloader_gen.go new file mode 100644 index 0000000000..7e04418217 --- /dev/null +++ b/internal/api/loaders/audiolastplayedloader_gen.go @@ -0,0 +1,222 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" +) + +// AudioLastPlayedLoaderConfig captures the config to create a new AudioLastPlayedLoader +type AudioLastPlayedLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*time.Time, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewAudioLastPlayedLoader creates a new AudioLastPlayedLoader given a fetch, wait, and maxBatch +func NewAudioLastPlayedLoader(config AudioLastPlayedLoaderConfig) *AudioLastPlayedLoader { + return &AudioLastPlayedLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// AudioLastPlayedLoader batches and caches requests +type AudioLastPlayedLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*time.Time, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*time.Time + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *audioLastPlayedLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type audioLastPlayedLoaderBatch struct { + keys []int + data []*time.Time + error []error + closing bool + done chan struct{} +} + +// Load a Time by key, batching and caching will be applied automatically +func (l *AudioLastPlayedLoader) Load(key int) (*time.Time, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Time. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioLastPlayedLoader) LoadThunk(key int) func() (*time.Time, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*time.Time, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &audioLastPlayedLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*time.Time, error) { + <-batch.done + + var data *time.Time + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *AudioLastPlayedLoader) LoadAll(keys []int) ([]*time.Time, []error) { + results := make([]func() (*time.Time, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + times := make([]*time.Time, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + times[i], errors[i] = thunk() + } + return times, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Times. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioLastPlayedLoader) LoadAllThunk(keys []int) func() ([]*time.Time, []error) { + results := make([]func() (*time.Time, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*time.Time, []error) { + times := make([]*time.Time, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + times[i], errors[i] = thunk() + } + return times, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *AudioLastPlayedLoader) Prime(key int, value *time.Time) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *AudioLastPlayedLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *AudioLastPlayedLoader) unsafeSet(key int, value *time.Time) { + if l.cache == nil { + l.cache = map[int]*time.Time{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *audioLastPlayedLoaderBatch) keyIndex(l *AudioLastPlayedLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *audioLastPlayedLoaderBatch) startTimer(l *AudioLastPlayedLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *audioLastPlayedLoaderBatch) end(l *AudioLastPlayedLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/audioloader_gen.go b/internal/api/loaders/audioloader_gen.go new file mode 100644 index 0000000000..43efc52204 --- /dev/null +++ b/internal/api/loaders/audioloader_gen.go @@ -0,0 +1,224 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" + + "github.com/stashapp/stash/pkg/models" +) + +// AudioLoaderConfig captures the config to create a new AudioLoader +type AudioLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]*models.Audio, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewAudioLoader creates a new AudioLoader given a fetch, wait, and maxBatch +func NewAudioLoader(config AudioLoaderConfig) *AudioLoader { + return &AudioLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// AudioLoader batches and caches requests +type AudioLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]*models.Audio, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]*models.Audio + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *audioLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type audioLoaderBatch struct { + keys []int + data []*models.Audio + error []error + closing bool + done chan struct{} +} + +// Load a Audio by key, batching and caching will be applied automatically +func (l *AudioLoader) Load(key int) (*models.Audio, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Audio. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioLoader) LoadThunk(key int) func() (*models.Audio, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (*models.Audio, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &audioLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (*models.Audio, error) { + <-batch.done + + var data *models.Audio + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *AudioLoader) LoadAll(keys []int) ([]*models.Audio, []error) { + results := make([]func() (*models.Audio, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + audios := make([]*models.Audio, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + audios[i], errors[i] = thunk() + } + return audios, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Audios. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioLoader) LoadAllThunk(keys []int) func() ([]*models.Audio, []error) { + results := make([]func() (*models.Audio, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]*models.Audio, []error) { + audios := make([]*models.Audio, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + audios[i], errors[i] = thunk() + } + return audios, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *AudioLoader) Prime(key int, value *models.Audio) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := *value + l.unsafeSet(key, &cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *AudioLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *AudioLoader) unsafeSet(key int, value *models.Audio) { + if l.cache == nil { + l.cache = map[int]*models.Audio{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *audioLoaderBatch) keyIndex(l *AudioLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *audioLoaderBatch) startTimer(l *AudioLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *audioLoaderBatch) end(l *AudioLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/audioocountloader_gen.go b/internal/api/loaders/audioocountloader_gen.go new file mode 100644 index 0000000000..871324ffdd --- /dev/null +++ b/internal/api/loaders/audioocountloader_gen.go @@ -0,0 +1,219 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" +) + +// AudioOCountLoaderConfig captures the config to create a new AudioOCountLoader +type AudioOCountLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]int, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewAudioOCountLoader creates a new AudioOCountLoader given a fetch, wait, and maxBatch +func NewAudioOCountLoader(config AudioOCountLoaderConfig) *AudioOCountLoader { + return &AudioOCountLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// AudioOCountLoader batches and caches requests +type AudioOCountLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]int, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]int + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *audioOCountLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type audioOCountLoaderBatch struct { + keys []int + data []int + error []error + closing bool + done chan struct{} +} + +// Load a int by key, batching and caching will be applied automatically +func (l *AudioOCountLoader) Load(key int) (int, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a int. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioOCountLoader) LoadThunk(key int) func() (int, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (int, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &audioOCountLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (int, error) { + <-batch.done + + var data int + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *AudioOCountLoader) LoadAll(keys []int) ([]int, []error) { + results := make([]func() (int, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + ints := make([]int, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + ints[i], errors[i] = thunk() + } + return ints, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a ints. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioOCountLoader) LoadAllThunk(keys []int) func() ([]int, []error) { + results := make([]func() (int, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]int, []error) { + ints := make([]int, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + ints[i], errors[i] = thunk() + } + return ints, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *AudioOCountLoader) Prime(key int, value int) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + l.unsafeSet(key, value) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *AudioOCountLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *AudioOCountLoader) unsafeSet(key int, value int) { + if l.cache == nil { + l.cache = map[int]int{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *audioOCountLoaderBatch) keyIndex(l *AudioOCountLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *audioOCountLoaderBatch) startTimer(l *AudioOCountLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *audioOCountLoaderBatch) end(l *AudioOCountLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/audioohistoryloader_gen.go b/internal/api/loaders/audioohistoryloader_gen.go new file mode 100644 index 0000000000..875b47ca2f --- /dev/null +++ b/internal/api/loaders/audioohistoryloader_gen.go @@ -0,0 +1,223 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" +) + +// AudioOHistoryLoaderConfig captures the config to create a new AudioOHistoryLoader +type AudioOHistoryLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([][]time.Time, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewAudioOHistoryLoader creates a new AudioOHistoryLoader given a fetch, wait, and maxBatch +func NewAudioOHistoryLoader(config AudioOHistoryLoaderConfig) *AudioOHistoryLoader { + return &AudioOHistoryLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// AudioOHistoryLoader batches and caches requests +type AudioOHistoryLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([][]time.Time, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int][]time.Time + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *audioOHistoryLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type audioOHistoryLoaderBatch struct { + keys []int + data [][]time.Time + error []error + closing bool + done chan struct{} +} + +// Load a Time by key, batching and caching will be applied automatically +func (l *AudioOHistoryLoader) Load(key int) ([]time.Time, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Time. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioOHistoryLoader) LoadThunk(key int) func() ([]time.Time, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() ([]time.Time, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &audioOHistoryLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() ([]time.Time, error) { + <-batch.done + + var data []time.Time + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *AudioOHistoryLoader) LoadAll(keys []int) ([][]time.Time, []error) { + results := make([]func() ([]time.Time, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + times := make([][]time.Time, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + times[i], errors[i] = thunk() + } + return times, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Times. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioOHistoryLoader) LoadAllThunk(keys []int) func() ([][]time.Time, []error) { + results := make([]func() ([]time.Time, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([][]time.Time, []error) { + times := make([][]time.Time, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + times[i], errors[i] = thunk() + } + return times, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *AudioOHistoryLoader) Prime(key int, value []time.Time) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := make([]time.Time, len(value)) + copy(cpy, value) + l.unsafeSet(key, cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *AudioOHistoryLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *AudioOHistoryLoader) unsafeSet(key int, value []time.Time) { + if l.cache == nil { + l.cache = map[int][]time.Time{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *audioOHistoryLoaderBatch) keyIndex(l *AudioOHistoryLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *audioOHistoryLoaderBatch) startTimer(l *AudioOHistoryLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *audioOHistoryLoaderBatch) end(l *AudioOHistoryLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/audioplaycountloader_gen.go b/internal/api/loaders/audioplaycountloader_gen.go new file mode 100644 index 0000000000..60fe9246e5 --- /dev/null +++ b/internal/api/loaders/audioplaycountloader_gen.go @@ -0,0 +1,219 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" +) + +// AudioPlayCountLoaderConfig captures the config to create a new AudioPlayCountLoader +type AudioPlayCountLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([]int, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewAudioPlayCountLoader creates a new AudioPlayCountLoader given a fetch, wait, and maxBatch +func NewAudioPlayCountLoader(config AudioPlayCountLoaderConfig) *AudioPlayCountLoader { + return &AudioPlayCountLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// AudioPlayCountLoader batches and caches requests +type AudioPlayCountLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([]int, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int]int + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *audioPlayCountLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type audioPlayCountLoaderBatch struct { + keys []int + data []int + error []error + closing bool + done chan struct{} +} + +// Load a int by key, batching and caching will be applied automatically +func (l *AudioPlayCountLoader) Load(key int) (int, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a int. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioPlayCountLoader) LoadThunk(key int) func() (int, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() (int, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &audioPlayCountLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() (int, error) { + <-batch.done + + var data int + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *AudioPlayCountLoader) LoadAll(keys []int) ([]int, []error) { + results := make([]func() (int, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + ints := make([]int, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + ints[i], errors[i] = thunk() + } + return ints, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a ints. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioPlayCountLoader) LoadAllThunk(keys []int) func() ([]int, []error) { + results := make([]func() (int, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([]int, []error) { + ints := make([]int, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + ints[i], errors[i] = thunk() + } + return ints, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *AudioPlayCountLoader) Prime(key int, value int) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + l.unsafeSet(key, value) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *AudioPlayCountLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *AudioPlayCountLoader) unsafeSet(key int, value int) { + if l.cache == nil { + l.cache = map[int]int{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *audioPlayCountLoaderBatch) keyIndex(l *AudioPlayCountLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *audioPlayCountLoaderBatch) startTimer(l *AudioPlayCountLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *audioPlayCountLoaderBatch) end(l *AudioPlayCountLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/audioplayhistoryloader_gen.go b/internal/api/loaders/audioplayhistoryloader_gen.go new file mode 100644 index 0000000000..932b129094 --- /dev/null +++ b/internal/api/loaders/audioplayhistoryloader_gen.go @@ -0,0 +1,223 @@ +// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. + +package loaders + +import ( + "sync" + "time" +) + +// AudioPlayHistoryLoaderConfig captures the config to create a new AudioPlayHistoryLoader +type AudioPlayHistoryLoaderConfig struct { + // Fetch is a method that provides the data for the loader + Fetch func(keys []int) ([][]time.Time, []error) + + // Wait is how long wait before sending a batch + Wait time.Duration + + // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit + MaxBatch int +} + +// NewAudioPlayHistoryLoader creates a new AudioPlayHistoryLoader given a fetch, wait, and maxBatch +func NewAudioPlayHistoryLoader(config AudioPlayHistoryLoaderConfig) *AudioPlayHistoryLoader { + return &AudioPlayHistoryLoader{ + fetch: config.Fetch, + wait: config.Wait, + maxBatch: config.MaxBatch, + } +} + +// AudioPlayHistoryLoader batches and caches requests +type AudioPlayHistoryLoader struct { + // this method provides the data for the loader + fetch func(keys []int) ([][]time.Time, []error) + + // how long to done before sending a batch + wait time.Duration + + // this will limit the maximum number of keys to send in one batch, 0 = no limit + maxBatch int + + // INTERNAL + + // lazily created cache + cache map[int][]time.Time + + // the current batch. keys will continue to be collected until timeout is hit, + // then everything will be sent to the fetch method and out to the listeners + batch *audioPlayHistoryLoaderBatch + + // mutex to prevent races + mu sync.Mutex +} + +type audioPlayHistoryLoaderBatch struct { + keys []int + data [][]time.Time + error []error + closing bool + done chan struct{} +} + +// Load a Time by key, batching and caching will be applied automatically +func (l *AudioPlayHistoryLoader) Load(key int) ([]time.Time, error) { + return l.LoadThunk(key)() +} + +// LoadThunk returns a function that when called will block waiting for a Time. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioPlayHistoryLoader) LoadThunk(key int) func() ([]time.Time, error) { + l.mu.Lock() + if it, ok := l.cache[key]; ok { + l.mu.Unlock() + return func() ([]time.Time, error) { + return it, nil + } + } + if l.batch == nil { + l.batch = &audioPlayHistoryLoaderBatch{done: make(chan struct{})} + } + batch := l.batch + pos := batch.keyIndex(l, key) + l.mu.Unlock() + + return func() ([]time.Time, error) { + <-batch.done + + var data []time.Time + if pos < len(batch.data) { + data = batch.data[pos] + } + + var err error + // its convenient to be able to return a single error for everything + if len(batch.error) == 1 { + err = batch.error[0] + } else if batch.error != nil { + err = batch.error[pos] + } + + if err == nil { + l.mu.Lock() + l.unsafeSet(key, data) + l.mu.Unlock() + } + + return data, err + } +} + +// LoadAll fetches many keys at once. It will be broken into appropriate sized +// sub batches depending on how the loader is configured +func (l *AudioPlayHistoryLoader) LoadAll(keys []int) ([][]time.Time, []error) { + results := make([]func() ([]time.Time, error), len(keys)) + + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + + times := make([][]time.Time, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + times[i], errors[i] = thunk() + } + return times, errors +} + +// LoadAllThunk returns a function that when called will block waiting for a Times. +// This method should be used if you want one goroutine to make requests to many +// different data loaders without blocking until the thunk is called. +func (l *AudioPlayHistoryLoader) LoadAllThunk(keys []int) func() ([][]time.Time, []error) { + results := make([]func() ([]time.Time, error), len(keys)) + for i, key := range keys { + results[i] = l.LoadThunk(key) + } + return func() ([][]time.Time, []error) { + times := make([][]time.Time, len(keys)) + errors := make([]error, len(keys)) + for i, thunk := range results { + times[i], errors[i] = thunk() + } + return times, errors + } +} + +// Prime the cache with the provided key and value. If the key already exists, no change is made +// and false is returned. +// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) +func (l *AudioPlayHistoryLoader) Prime(key int, value []time.Time) bool { + l.mu.Lock() + var found bool + if _, found = l.cache[key]; !found { + // make a copy when writing to the cache, its easy to pass a pointer in from a loop var + // and end up with the whole cache pointing to the same value. + cpy := make([]time.Time, len(value)) + copy(cpy, value) + l.unsafeSet(key, cpy) + } + l.mu.Unlock() + return !found +} + +// Clear the value at key from the cache, if it exists +func (l *AudioPlayHistoryLoader) Clear(key int) { + l.mu.Lock() + delete(l.cache, key) + l.mu.Unlock() +} + +func (l *AudioPlayHistoryLoader) unsafeSet(key int, value []time.Time) { + if l.cache == nil { + l.cache = map[int][]time.Time{} + } + l.cache[key] = value +} + +// keyIndex will return the location of the key in the batch, if its not found +// it will add the key to the batch +func (b *audioPlayHistoryLoaderBatch) keyIndex(l *AudioPlayHistoryLoader, key int) int { + for i, existingKey := range b.keys { + if key == existingKey { + return i + } + } + + pos := len(b.keys) + b.keys = append(b.keys, key) + if pos == 0 { + go b.startTimer(l) + } + + if l.maxBatch != 0 && pos >= l.maxBatch-1 { + if !b.closing { + b.closing = true + l.batch = nil + go b.end(l) + } + } + + return pos +} + +func (b *audioPlayHistoryLoaderBatch) startTimer(l *AudioPlayHistoryLoader) { + time.Sleep(l.wait) + l.mu.Lock() + + // we must have hit a batch limit and are already finalizing this batch + if b.closing { + l.mu.Unlock() + return + } + + l.batch = nil + l.mu.Unlock() + + b.end(l) +} + +func (b *audioPlayHistoryLoaderBatch) end(l *AudioPlayHistoryLoader) { + b.data, b.error = l.fetch(b.keys) + close(b.done) +} diff --git a/internal/api/loaders/dataloaders.go b/internal/api/loaders/dataloaders.go index c1faf61ed3..b539c8f16a 100644 --- a/internal/api/loaders/dataloaders.go +++ b/internal/api/loaders/dataloaders.go @@ -3,6 +3,7 @@ // The dataloaders are used to batch requests to the database. //go:generate go run github.com/vektah/dataloaden SceneLoader int *github.com/stashapp/stash/pkg/models.Scene +//go:generate go run github.com/vektah/dataloaden AudioLoader int *github.com/stashapp/stash/pkg/models.Audio //go:generate go run github.com/vektah/dataloaden GalleryLoader int *github.com/stashapp/stash/pkg/models.Gallery //go:generate go run github.com/vektah/dataloaden ImageLoader int *github.com/stashapp/stash/pkg/models.Image //go:generate go run github.com/vektah/dataloaden PerformerLoader int *github.com/stashapp/stash/pkg/models.Performer @@ -13,6 +14,7 @@ //go:generate go run github.com/vektah/dataloaden FolderLoader github.com/stashapp/stash/pkg/models.FolderID *github.com/stashapp/stash/pkg/models.Folder //go:generate go run github.com/vektah/dataloaden FolderRelatedFolderIDsLoader github.com/stashapp/stash/pkg/models.FolderID []github.com/stashapp/stash/pkg/models.FolderID //go:generate go run github.com/vektah/dataloaden SceneFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID +//go:generate go run github.com/vektah/dataloaden AudioFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID //go:generate go run github.com/vektah/dataloaden ImageFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID //go:generate go run github.com/vektah/dataloaden GalleryFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID //go:generate go run github.com/vektah/dataloaden CustomFieldsLoader int github.com/stashapp/stash/pkg/models.CustomFieldMap @@ -21,6 +23,11 @@ //go:generate go run github.com/vektah/dataloaden SceneOHistoryLoader int []time.Time //go:generate go run github.com/vektah/dataloaden ScenePlayHistoryLoader int []time.Time //go:generate go run github.com/vektah/dataloaden SceneLastPlayedLoader int *time.Time +//go:generate go run github.com/vektah/dataloaden AudioOCountLoader int int +//go:generate go run github.com/vektah/dataloaden AudioPlayCountLoader int int +//go:generate go run github.com/vektah/dataloaden AudioOHistoryLoader int []time.Time +//go:generate go run github.com/vektah/dataloaden AudioPlayHistoryLoader int []time.Time +//go:generate go run github.com/vektah/dataloaden AudioLastPlayedLoader int *time.Time package loaders import ( @@ -52,6 +59,15 @@ type Loaders struct { SceneLastPlayed *SceneLastPlayedLoader SceneCustomFields *CustomFieldsLoader + AudioByID *AudioLoader + AudioFiles *AudioFileIDsLoader + AudioPlayCount *AudioPlayCountLoader + AudioOCount *AudioOCountLoader + AudioPlayHistory *AudioPlayHistoryLoader + AudioOHistory *AudioOHistoryLoader + AudioLastPlayed *AudioLastPlayedLoader + AudioCustomFields *CustomFieldsLoader + ImageFiles *ImageFileIDsLoader GalleryFiles *GalleryFileIDsLoader @@ -217,6 +233,32 @@ func (m Middleware) Middleware(next http.Handler) http.Handler { maxBatch: maxBatch, fetch: m.fetchScenesOHistory(ctx), }, + // Audio + AudioPlayCount: &AudioPlayCountLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchAudiosPlayCount(ctx), + }, + AudioOCount: &AudioOCountLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchAudiosOCount(ctx), + }, + AudioPlayHistory: &AudioPlayHistoryLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchAudiosPlayHistory(ctx), + }, + AudioLastPlayed: &AudioLastPlayedLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchAudiosLastPlayed(ctx), + }, + AudioOHistory: &AudioOHistoryLoader{ + wait: wait, + maxBatch: maxBatch, + fetch: m.fetchAudiosOHistory(ctx), + }, } newCtx := context.WithValue(r.Context(), loadersCtxKey, ldrs) @@ -531,3 +573,59 @@ func (m Middleware) fetchScenesLastPlayed(ctx context.Context) func(keys []int) return ret, toErrorSlice(err) } } + +// Audio +func (m Middleware) fetchAudiosOCount(ctx context.Context) func(keys []int) ([]int, []error) { + return func(keys []int) (ret []int, errs []error) { + err := m.Repository.WithDB(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Audio.GetManyOCount(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchAudiosPlayCount(ctx context.Context) func(keys []int) ([]int, []error) { + return func(keys []int) (ret []int, errs []error) { + err := m.Repository.WithDB(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Audio.GetManyViewCount(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchAudiosOHistory(ctx context.Context) func(keys []int) ([][]time.Time, []error) { + return func(keys []int) (ret [][]time.Time, errs []error) { + err := m.Repository.WithDB(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Audio.GetManyODates(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchAudiosPlayHistory(ctx context.Context) func(keys []int) ([][]time.Time, []error) { + return func(keys []int) (ret [][]time.Time, errs []error) { + err := m.Repository.WithDB(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Audio.GetManyViewDates(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} + +func (m Middleware) fetchAudiosLastPlayed(ctx context.Context) func(keys []int) ([]*time.Time, []error) { + return func(keys []int) (ret []*time.Time, errs []error) { + err := m.Repository.WithDB(ctx, func(ctx context.Context) error { + var err error + ret, err = m.Repository.Audio.GetManyLastViewed(ctx, keys) + return err + }) + return ret, toErrorSlice(err) + } +} diff --git a/internal/api/models.go b/internal/api/models.go index 1c73466971..74fe478f93 100644 --- a/internal/api/models.go +++ b/internal/api/models.go @@ -75,6 +75,18 @@ func (f *VideoFile) Fingerprints() []models.Fingerprint { return f.VideoFile.Fingerprints } +type AudioFile struct { + *models.AudioFile +} + +func (AudioFile) IsBaseFile() {} + +func (AudioFile) IsVisualFile() {} + +func (f *AudioFile) Fingerprints() []models.Fingerprint { + return f.AudioFile.Fingerprints +} + type ImageFile struct { *models.ImageFile } diff --git a/internal/api/resolver.go b/internal/api/resolver.go index b1cec1c9dc..42d62c3576 100644 --- a/internal/api/resolver.go +++ b/internal/api/resolver.go @@ -35,6 +35,7 @@ type hookExecutor interface { type Resolver struct { repository models.Repository sceneService manager.SceneService + audioService manager.AudioService imageService manager.ImageService galleryService manager.GalleryService groupService manager.GroupService @@ -64,6 +65,9 @@ func (r *Resolver) Query() QueryResolver { func (r *Resolver) Scene() SceneResolver { return &sceneResolver{r} } +func (r *Resolver) Audio() AudioResolver { + return &audioResolver{r} +} func (r *Resolver) Image() ImageResolver { return &imageResolver{r} } @@ -93,6 +97,9 @@ func (r *Resolver) GalleryFile() GalleryFileResolver { func (r *Resolver) VideoFile() VideoFileResolver { return &videoFileResolver{r} } +func (r *Resolver) AudioFile() AudioFileResolver { + return &audioFileResolver{r} +} func (r *Resolver) ImageFile() ImageFileResolver { return &imageFileResolver{r} } @@ -121,6 +128,7 @@ type galleryChapterResolver struct{ *Resolver } type performerResolver struct{ *Resolver } type sceneResolver struct{ *Resolver } type sceneMarkerResolver struct{ *Resolver } +type audioResolver struct{ *Resolver } type imageResolver struct{ *Resolver } type studioResolver struct{ *Resolver } @@ -131,6 +139,7 @@ type movieResolver struct{ *groupResolver } type tagResolver struct{ *Resolver } type galleryFileResolver struct{ *Resolver } type videoFileResolver struct{ *Resolver } +type audioFileResolver struct{ *Resolver } type imageFileResolver struct{ *Resolver } type basicFileResolver struct{ *Resolver } type folderResolver struct{ *Resolver } diff --git a/internal/api/resolver_model_audio.go b/internal/api/resolver_model_audio.go new file mode 100644 index 0000000000..19d99fb191 --- /dev/null +++ b/internal/api/resolver_model_audio.go @@ -0,0 +1,350 @@ +// TODO(audio): update this file + +package api + +import ( + "context" + "fmt" + "time" + + "github.com/stashapp/stash/internal/api/loaders" + "github.com/stashapp/stash/internal/api/urlbuilders" + "github.com/stashapp/stash/internal/manager" + "github.com/stashapp/stash/pkg/models" +) + +func convertAudioFile(f models.File) (*models.AudioFile, error) { + vf, ok := f.(*models.AudioFile) + if !ok { + return nil, fmt.Errorf("file %T is not a video file", f) + } + return vf, nil +} + +func (r *audioResolver) getPrimaryFile(ctx context.Context, obj *models.Audio) (*models.AudioFile, error) { + if obj.PrimaryFileID != nil { + f, err := loaders.From(ctx).FileByID.Load(*obj.PrimaryFileID) + if err != nil { + return nil, err + } + + ret, err := convertAudioFile(f) + if err != nil { + return nil, err + } + + obj.Files.SetPrimary(ret) + + return ret, nil + } else { + _ = obj.LoadPrimaryFile(ctx, r.repository.File) + } + + return nil, nil +} + +func (r *audioResolver) getFiles(ctx context.Context, obj *models.Audio) ([]*models.AudioFile, error) { + fileIDs, err := loaders.From(ctx).AudioFiles.Load(obj.ID) + if err != nil { + return nil, err + } + + files, errs := loaders.From(ctx).FileByID.LoadAll(fileIDs) + err = firstError(errs) + if err != nil { + return nil, err + } + + ret := make([]*models.AudioFile, len(files)) + for i, f := range files { + ret[i], err = convertAudioFile(f) + if err != nil { + return nil, err + } + } + + obj.Files.Set(ret) + + return ret, nil +} + +func (r *audioResolver) Date(ctx context.Context, obj *models.Audio) (*string, error) { + if obj.Date != nil { + result := obj.Date.String() + return &result, nil + } + return nil, nil +} + +func (r *audioResolver) Files(ctx context.Context, obj *models.Audio) ([]*AudioFile, error) { + files, err := r.getFiles(ctx, obj) + if err != nil { + return nil, err + } + + ret := make([]*AudioFile, len(files)) + + for i, f := range files { + ret[i] = &AudioFile{ + AudioFile: f, + } + } + + return ret, nil +} + +func (r *audioResolver) Rating(ctx context.Context, obj *models.Audio) (*int, error) { + if obj.Rating != nil { + rating := models.Rating100To5(*obj.Rating) + return &rating, nil + } + return nil, nil +} + +func (r *audioResolver) Rating100(ctx context.Context, obj *models.Audio) (*int, error) { + return obj.Rating, nil +} + +func (r *audioResolver) Paths(ctx context.Context, obj *models.Audio) (*AudioPathsType, error) { + baseURL, _ := ctx.Value(BaseURLCtxKey).(string) + config := manager.GetInstance().Config + builder := urlbuilders.NewAudioURLBuilder(baseURL, obj) + screenshotPath := builder.GetScreenshotURL() + previewPath := builder.GetStreamPreviewURL() + streamPath := builder.GetStreamURL(config.GetAPIKey()).String() + webpPath := builder.GetStreamPreviewImageURL() + objHash := obj.GetHash(config.GetAudioFileNamingAlgorithm()) + vttPath := builder.GetSpriteVTTURL(objHash) + spritePath := builder.GetSpriteURL(objHash) + funscriptPath := builder.GetFunscriptURL() + captionBasePath := builder.GetCaptionURL() + + return &AudioPathsType{ + Screenshot: &screenshotPath, + Preview: &previewPath, + Stream: &streamPath, + Webp: &webpPath, + Vtt: &vttPath, + Sprite: &spritePath, + Funscript: &funscriptPath, + Caption: &captionBasePath, + }, nil +} + +// TODO(audio|AudioCaption): need to update IF AudioCaption required +func (r *audioResolver) Captions(ctx context.Context, obj *models.Audio) (ret []*models.VideoCaption, err error) { + primaryFile, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + if primaryFile == nil { + return nil, nil + } + + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.File.GetCaptions(ctx, primaryFile.Base().ID) + return err + }); err != nil { + return nil, err + } + + return ret, err +} + +func (r *audioResolver) Galleries(ctx context.Context, obj *models.Audio) (ret []*models.Gallery, err error) { + if !obj.GalleryIDs.Loaded() { + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + return obj.LoadGalleryIDs(ctx, r.repository.Audio) + }); err != nil { + return nil, err + } + } + + var errs []error + ret, errs = loaders.From(ctx).GalleryByID.LoadAll(obj.GalleryIDs.List()) + return ret, firstError(errs) +} + +func (r *audioResolver) Studio(ctx context.Context, obj *models.Audio) (ret *models.Studio, err error) { + if obj.StudioID == nil { + return nil, nil + } + + return loaders.From(ctx).StudioByID.Load(*obj.StudioID) +} + +func (r *audioResolver) Groups(ctx context.Context, obj *models.Audio) (ret []*AudioGroup, err error) { + if !obj.Groups.Loaded() { + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + return obj.LoadGroups(ctx, qb) + }); err != nil { + return nil, err + } + } + + loader := loaders.From(ctx).GroupByID + + for _, sm := range obj.Groups.List() { + group, err := loader.Load(sm.GroupID) + if err != nil { + return nil, err + } + + audioIdx := sm.AudioIndex + audioGroup := &AudioGroup{ + Group: group, + AudioIndex: audioIdx, + } + + ret = append(ret, audioGroup) + } + + return ret, nil +} + +func (r *audioResolver) Tags(ctx context.Context, obj *models.Audio) (ret []*models.Tag, err error) { + if !obj.TagIDs.Loaded() { + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + return obj.LoadTagIDs(ctx, r.repository.Audio) + }); err != nil { + return nil, err + } + } + + var errs []error + ret, errs = loaders.From(ctx).TagByID.LoadAll(obj.TagIDs.List()) + return ret, firstError(errs) +} + +func (r *audioResolver) Performers(ctx context.Context, obj *models.Audio) (ret []*models.Performer, err error) { + if !obj.PerformerIDs.Loaded() { + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + return obj.LoadPerformerIDs(ctx, r.repository.Audio) + }); err != nil { + return nil, err + } + } + + var errs []error + ret, errs = loaders.From(ctx).PerformerByID.LoadAll(obj.PerformerIDs.List()) + return ret, firstError(errs) +} + +func (r *audioResolver) AudioStreams(ctx context.Context, obj *models.Audio) ([]*manager.AudioStreamEndpoint, error) { + // load the primary file into the audio + _, err := r.getPrimaryFile(ctx, obj) + if err != nil { + return nil, err + } + + config := manager.GetInstance().Config + + baseURL, _ := ctx.Value(BaseURLCtxKey).(string) + builder := urlbuilders.NewAudioURLBuilder(baseURL, obj) + apiKey := config.GetAPIKey() + + return manager.GetAudioStreamPaths(obj, builder.GetStreamURL(apiKey), config.GetMaxStreamingTranscodeSize()) +} + +func (r *audioResolver) URL(ctx context.Context, obj *models.Audio) (*string, error) { + if !obj.URLs.Loaded() { + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + return obj.LoadURLs(ctx, r.repository.Audio) + }); err != nil { + return nil, err + } + } + + urls := obj.URLs.List() + if len(urls) == 0 { + return nil, nil + } + + return &urls[0], nil +} + +func (r *audioResolver) Urls(ctx context.Context, obj *models.Audio) ([]string, error) { + if !obj.URLs.Loaded() { + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + return obj.LoadURLs(ctx, r.repository.Audio) + }); err != nil { + return nil, err + } + } + + return obj.URLs.List(), nil +} + +func (r *audioResolver) OCounter(ctx context.Context, obj *models.Audio) (*int, error) { + ret, err := loaders.From(ctx).AudioOCount.Load(obj.ID) + if err != nil { + return nil, err + } + + return &ret, nil +} + +func (r *audioResolver) LastPlayedAt(ctx context.Context, obj *models.Audio) (*time.Time, error) { + ret, err := loaders.From(ctx).AudioLastPlayed.Load(obj.ID) + if err != nil { + return nil, err + } + + return ret, nil +} + +func (r *audioResolver) PlayCount(ctx context.Context, obj *models.Audio) (*int, error) { + ret, err := loaders.From(ctx).AudioPlayCount.Load(obj.ID) + if err != nil { + return nil, err + } + + return &ret, nil +} + +func (r *audioResolver) PlayHistory(ctx context.Context, obj *models.Audio) ([]*time.Time, error) { + ret, err := loaders.From(ctx).AudioPlayHistory.Load(obj.ID) + if err != nil { + return nil, err + } + + // convert to pointer slice + ptrRet := make([]*time.Time, len(ret)) + for i, t := range ret { + tt := t + ptrRet[i] = &tt + } + + return ptrRet, nil +} + +func (r *audioResolver) OHistory(ctx context.Context, obj *models.Audio) ([]*time.Time, error) { + ret, err := loaders.From(ctx).AudioOHistory.Load(obj.ID) + if err != nil { + return nil, err + } + + // convert to pointer slice + ptrRet := make([]*time.Time, len(ret)) + for i, t := range ret { + tt := t + ptrRet[i] = &tt + } + + return ptrRet, nil +} + +func (r *audioResolver) CustomFields(ctx context.Context, obj *models.Audio) (map[string]interface{}, error) { + m, err := loaders.From(ctx).AudioCustomFields.Load(obj.ID) + if err != nil { + return nil, err + } + + if m == nil { + return make(map[string]interface{}), nil + } + + return m, nil +} diff --git a/internal/api/resolver_model_file.go b/internal/api/resolver_model_file.go index 4b9995311f..545637eb52 100644 --- a/internal/api/resolver_model_file.go +++ b/internal/api/resolver_model_file.go @@ -28,6 +28,10 @@ func (r *videoFileResolver) Fingerprint(ctx context.Context, obj *VideoFile, typ return fingerprintResolver(obj.VideoFile.Fingerprints, type_) } +func (r *audioFileResolver) Fingerprint(ctx context.Context, obj *AudioFile, type_ string) (*string, error) { + return fingerprintResolver(obj.AudioFile.Fingerprints, type_) +} + func (r *basicFileResolver) Fingerprint(ctx context.Context, obj *BasicFile, type_ string) (*string, error) { return fingerprintResolver(obj.BaseFile.Fingerprints, type_) } @@ -43,6 +47,9 @@ func (r *imageFileResolver) ParentFolder(ctx context.Context, obj *ImageFile) (* func (r *videoFileResolver) ParentFolder(ctx context.Context, obj *VideoFile) (*models.Folder, error) { return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID) } +func (r *audioFileResolver) ParentFolder(ctx context.Context, obj *AudioFile) (*models.Folder, error) { + return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID) +} func (r *basicFileResolver) ParentFolder(ctx context.Context, obj *BasicFile) (*models.Folder, error) { return loaders.From(ctx).FolderByID.Load(obj.ParentFolderID) @@ -74,6 +81,9 @@ func (r *imageFileResolver) ZipFile(ctx context.Context, obj *ImageFile) (*Basic func (r *videoFileResolver) ZipFile(ctx context.Context, obj *VideoFile) (*BasicFile, error) { return zipFileResolver(ctx, obj.ZipFileID) } +func (r *audioFileResolver) ZipFile(ctx context.Context, obj *AudioFile) (*BasicFile, error) { + return zipFileResolver(ctx, obj.ZipFileID) +} func (r *basicFileResolver) ZipFile(ctx context.Context, obj *BasicFile) (*BasicFile, error) { return zipFileResolver(ctx, obj.ZipFileID) diff --git a/internal/api/resolver_mutation_audio.go b/internal/api/resolver_mutation_audio.go new file mode 100644 index 0000000000..9e0126a413 --- /dev/null +++ b/internal/api/resolver_mutation_audio.go @@ -0,0 +1,918 @@ +// TODO(audio): update this file + +package api + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/stashapp/stash/internal/manager" + "github.com/stashapp/stash/pkg/audio" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/plugin" + "github.com/stashapp/stash/pkg/plugin/hook" + "github.com/stashapp/stash/pkg/sliceutil" + "github.com/stashapp/stash/pkg/sliceutil/stringslice" + "github.com/stashapp/stash/pkg/utils" +) + +// used to refetch audio after hooks run +func (r *mutationResolver) getAudio(ctx context.Context, id int) (ret *models.Audio, err error) { + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Audio.Find(ctx, id) + return err + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (r *mutationResolver) AudioCreate(ctx context.Context, input models.AudioCreateInput) (ret *models.Audio, err error) { + translator := changesetTranslator{ + inputMap: getUpdateInputMap(ctx), + } + + fileIDs, err := translator.fileIDSliceFromStringSlice(input.FileIds) + if err != nil { + return nil, fmt.Errorf("converting file ids: %w", err) + } + + // Populate a new audio from the input + newAudio := models.NewAudio() + + newAudio.Title = translator.string(input.Title) + newAudio.Code = translator.string(input.Code) + newAudio.Details = translator.string(input.Details) + newAudio.Rating = input.Rating100 + newAudio.Organized = translator.bool(input.Organized) + + newAudio.Date, err = translator.datePtr(input.Date) + if err != nil { + return nil, fmt.Errorf("converting date: %w", err) + } + newAudio.StudioID, err = translator.intPtrFromString(input.StudioID) + if err != nil { + return nil, fmt.Errorf("converting studio id: %w", err) + } + + if input.Urls != nil { + newAudio.URLs = models.NewRelatedStrings(stringslice.TrimSpace(input.Urls)) + } else if input.URL != nil { + newAudio.URLs = models.NewRelatedStrings([]string{strings.TrimSpace(*input.URL)}) + } + + newAudio.PerformerIDs, err = translator.relatedIds(input.PerformerIds) + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } + newAudio.TagIDs, err = translator.relatedIds(input.TagIds) + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + + if len(input.Groups) > 0 { + newAudio.Groups, err = translator.relatedGroupsAudio(input.Groups) + if err != nil { + return nil, fmt.Errorf("converting groups: %w", err) + } + } + + var coverImageData []byte + if input.CoverImage != nil { + var err error + coverImageData, err = utils.ProcessImageInput(ctx, *input.CoverImage) + if err != nil { + return nil, fmt.Errorf("processing cover image: %w", err) + } + } + + customFields := convertMapJSONNumbers(input.CustomFields) + + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.Resolver.audioService.Create(ctx, models.CreateAudioInput{ + Audio: &newAudio, + FileIDs: fileIDs, + CoverImage: coverImageData, + CustomFields: customFields, + }) + return err + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (r *mutationResolver) AudioUpdate(ctx context.Context, input models.AudioUpdateInput) (ret *models.Audio, err error) { + translator := changesetTranslator{ + inputMap: getUpdateInputMap(ctx), + } + + // Start the transaction and save the audio + if err := r.withTxn(ctx, func(ctx context.Context) error { + ret, err = r.audioUpdate(ctx, input, translator) + return err + }); err != nil { + return nil, err + } + + r.hookExecutor.ExecutePostHooks(ctx, ret.ID, hook.AudioUpdatePost, input, translator.getFields()) + return r.getAudio(ctx, ret.ID) +} + +func (r *mutationResolver) AudiosUpdate(ctx context.Context, input []*models.AudioUpdateInput) (ret []*models.Audio, err error) { + inputMaps := getUpdateInputMaps(ctx) + + // Start the transaction and save the audios + if err := r.withTxn(ctx, func(ctx context.Context) error { + for i, audio := range input { + translator := changesetTranslator{ + inputMap: inputMaps[i], + } + + thisAudio, err := r.audioUpdate(ctx, *audio, translator) + if err != nil { + return err + } + + ret = append(ret, thisAudio) + } + + return nil + }); err != nil { + return nil, err + } + + // execute post hooks outside of txn + var newRet []*models.Audio + for i, audio := range ret { + translator := changesetTranslator{ + inputMap: inputMaps[i], + } + + r.hookExecutor.ExecutePostHooks(ctx, audio.ID, hook.AudioUpdatePost, input, translator.getFields()) + + audio, err = r.getAudio(ctx, audio.ID) + if err != nil { + return nil, err + } + + newRet = append(newRet, audio) + } + + return newRet, nil +} + +func audioPartialFromInput(input models.AudioUpdateInput, translator changesetTranslator) (*models.AudioPartial, error) { + updatedAudio := models.NewAudioPartial() + + updatedAudio.Title = translator.optionalString(input.Title, "title") + updatedAudio.Code = translator.optionalString(input.Code, "code") + updatedAudio.Details = translator.optionalString(input.Details, "details") + updatedAudio.Rating = translator.optionalInt(input.Rating100, "rating100") + + if input.OCounter != nil { + logger.Warnf("o_counter is deprecated and no longer supported, use audioIncrementO/audioDecrementO instead") + } + + if input.PlayCount != nil { + logger.Warnf("play_count is deprecated and no longer supported, use audioIncrementPlayCount/audioDecrementPlayCount instead") + } + + updatedAudio.PlayDuration = translator.optionalFloat64(input.PlayDuration, "play_duration") + updatedAudio.Organized = translator.optionalBool(input.Organized, "organized") + + var err error + + updatedAudio.Date, err = translator.optionalDate(input.Date, "date") + if err != nil { + return nil, fmt.Errorf("converting date: %w", err) + } + updatedAudio.StudioID, err = translator.optionalIntFromString(input.StudioID, "studio_id") + if err != nil { + return nil, fmt.Errorf("converting studio id: %w", err) + } + + updatedAudio.URLs = translator.optionalURLs(input.Urls, input.URL) + + updatedAudio.PrimaryFileID, err = translator.fileIDPtrFromString(input.PrimaryFileID) + if err != nil { + return nil, fmt.Errorf("converting primary file id: %w", err) + } + + updatedAudio.PerformerIDs, err = translator.updateIds(input.PerformerIds, "performer_ids") + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } + updatedAudio.TagIDs, err = translator.updateIds(input.TagIds, "tag_ids") + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + + if translator.hasField("groups") { + updatedAudio.GroupIDs, err = translator.updateGroupIDsAudio(input.Groups, "groups") + if err != nil { + return nil, fmt.Errorf("converting groups: %w", err) + } + } + + return &updatedAudio, nil +} + +func (r *mutationResolver) audioUpdate(ctx context.Context, input models.AudioUpdateInput, translator changesetTranslator) (*models.Audio, error) { + audioID, err := strconv.Atoi(input.ID) + if err != nil { + return nil, fmt.Errorf("converting id: %w", err) + } + + qb := r.repository.Audio + + originalAudio, err := qb.Find(ctx, audioID) + if err != nil { + return nil, err + } + + if originalAudio == nil { + return nil, fmt.Errorf("audio with id %d not found", audioID) + } + + // Populate audio from the input + updatedAudio, err := audioPartialFromInput(input, translator) + if err != nil { + return nil, err + } + + // ensure that title is set where audio has no file + if updatedAudio.Title.Set && updatedAudio.Title.Value == "" { + if err := originalAudio.LoadFiles(ctx, r.repository.Audio); err != nil { + return nil, err + } + + if len(originalAudio.Files.List()) == 0 { + return nil, errors.New("title must be set if audio has no files") + } + } + + if updatedAudio.PrimaryFileID != nil { + newPrimaryFileID := *updatedAudio.PrimaryFileID + + // if file hash has changed, we should migrate generated files + // after commit + if err := originalAudio.LoadFiles(ctx, r.repository.Audio); err != nil { + return nil, err + } + + // ensure that new primary file is associated with audio + var f *models.AudioFile + for _, ff := range originalAudio.Files.List() { + if ff.ID == newPrimaryFileID { + f = ff + } + } + + if f == nil { + return nil, fmt.Errorf("file with id %d not associated with audio", newPrimaryFileID) + } + } + + var coverImageData []byte + coverImageIncluded := translator.hasField("cover_image") + if input.CoverImage != nil { + var err error + coverImageData, err = utils.ProcessImageInput(ctx, *input.CoverImage) + if err != nil { + return nil, fmt.Errorf("processing cover image: %w", err) + } + } + + var customFields *models.CustomFieldsInput + if input.CustomFields != nil { + cfCopy := *input.CustomFields + customFields = &cfCopy + // convert json.Numbers to int/float + customFields.Full = convertMapJSONNumbers(customFields.Full) + customFields.Partial = convertMapJSONNumbers(customFields.Partial) + } + + audio, err := qb.UpdatePartial(ctx, audioID, *updatedAudio) + if err != nil { + return nil, err + } + + if coverImageIncluded { + if err := r.audioUpdateCoverImage(ctx, audio, coverImageData); err != nil { + return nil, err + } + } + + if customFields != nil { + if err := qb.SetCustomFields(ctx, audio.ID, *customFields); err != nil { + return nil, err + } + } + + return audio, nil +} + +func (r *mutationResolver) audioUpdateCoverImage(ctx context.Context, s *models.Audio, coverImageData []byte) error { + qb := r.repository.Audio + + // update cover table - empty data will clear the cover + if err := qb.UpdateCover(ctx, s.ID, coverImageData); err != nil { + return err + } + + return nil +} + +func (r *mutationResolver) BulkAudioUpdate(ctx context.Context, input BulkAudioUpdateInput) ([]*models.Audio, error) { + audioIDs, err := stringslice.StringSliceToIntSlice(input.Ids) + if err != nil { + return nil, fmt.Errorf("converting ids: %w", err) + } + + translator := changesetTranslator{ + inputMap: getUpdateInputMap(ctx), + } + + // Populate audio from the input + updatedAudio := models.NewAudioPartial() + + updatedAudio.Title = translator.optionalString(input.Title, "title") + updatedAudio.Code = translator.optionalString(input.Code, "code") + updatedAudio.Details = translator.optionalString(input.Details, "details") + updatedAudio.Rating = translator.optionalInt(input.Rating100, "rating100") + updatedAudio.Organized = translator.optionalBool(input.Organized, "organized") + + updatedAudio.Date, err = translator.optionalDate(input.Date, "date") + if err != nil { + return nil, fmt.Errorf("converting date: %w", err) + } + updatedAudio.StudioID, err = translator.optionalIntFromString(input.StudioID, "studio_id") + if err != nil { + return nil, fmt.Errorf("converting studio id: %w", err) + } + + updatedAudio.URLs = translator.optionalURLsBulk(input.Urls, input.URL) + + updatedAudio.PerformerIDs, err = translator.updateIdsBulk(input.PerformerIds, "performer_ids") + if err != nil { + return nil, fmt.Errorf("converting performer ids: %w", err) + } + updatedAudio.TagIDs, err = translator.updateIdsBulk(input.TagIds, "tag_ids") + if err != nil { + return nil, fmt.Errorf("converting tag ids: %w", err) + } + + if translator.hasField("group_ids") { + updatedAudio.GroupIDs, err = translator.updateGroupIDsBulkAudio(input.GroupIds, "group_ids") + if err != nil { + return nil, fmt.Errorf("converting group ids: %w", err) + } + } + + var customFields *models.CustomFieldsInput + if input.CustomFields != nil { + cf := handleUpdateCustomFields(*input.CustomFields) + customFields = &cf + } + + ret := []*models.Audio{} + + // Start the transaction and save the audios + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + for _, audioID := range audioIDs { + audio, err := qb.UpdatePartial(ctx, audioID, updatedAudio) + if err != nil { + return err + } + + if customFields != nil { + if err := qb.SetCustomFields(ctx, audio.ID, *customFields); err != nil { + return err + } + } + + ret = append(ret, audio) + } + + return nil + }); err != nil { + return nil, err + } + + // execute post hooks outside of txn + var newRet []*models.Audio + for _, audio := range ret { + r.hookExecutor.ExecutePostHooks(ctx, audio.ID, hook.AudioUpdatePost, input, translator.getFields()) + + audio, err = r.getAudio(ctx, audio.ID) + if err != nil { + return nil, err + } + + newRet = append(newRet, audio) + } + + return newRet, nil +} + +func (r *mutationResolver) AudioDestroy(ctx context.Context, input models.AudioDestroyInput) (bool, error) { + audioID, err := strconv.Atoi(input.ID) + if err != nil { + return false, fmt.Errorf("converting id: %w", err) + } + + fileNamingAlgo := manager.GetInstance().Config.GetAudioFileNamingAlgorithm() + trashPath := manager.GetInstance().Config.GetDeleteTrashPath() + + var s *models.Audio + fileDeleter := &audio.FileDeleter{ + Deleter: file.NewDeleterWithTrash(trashPath), + FileNamingAlgo: fileNamingAlgo, + Paths: manager.GetInstance().Paths, + } + + deleteGenerated := utils.IsTrue(input.DeleteGenerated) + deleteFile := utils.IsTrue(input.DeleteFile) + destroyFileEntry := utils.IsTrue(input.DestroyFileEntry) + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + var err error + s, err = qb.Find(ctx, audioID) + if err != nil { + return err + } + + if s == nil { + return fmt.Errorf("audio with id %d not found", audioID) + } + + // kill any running encoders + manager.KillRunningStreamsAudio(s, fileNamingAlgo) + + return r.audioService.Destroy(ctx, s, fileDeleter, deleteGenerated, deleteFile, destroyFileEntry) + }); err != nil { + fileDeleter.Rollback() + return false, err + } + + // perform the post-commit actions + fileDeleter.Commit() + + // call post hook after performing the other actions + r.hookExecutor.ExecutePostHooks(ctx, s.ID, hook.AudioDestroyPost, plugin.AudioDestroyInput{ + AudioDestroyInput: input, + Checksum: s.Checksum, + OSHash: s.OSHash, + Path: s.Path, + }, nil) + + return true, nil +} + +func (r *mutationResolver) AudiosDestroy(ctx context.Context, input models.AudiosDestroyInput) (bool, error) { + audioIDs, err := stringslice.StringSliceToIntSlice(input.Ids) + if err != nil { + return false, fmt.Errorf("converting ids: %w", err) + } + + var audios []*models.Audio + fileNamingAlgo := manager.GetInstance().Config.GetAudioFileNamingAlgorithm() + trashPath := manager.GetInstance().Config.GetDeleteTrashPath() + + fileDeleter := &audio.FileDeleter{ + Deleter: file.NewDeleterWithTrash(trashPath), + FileNamingAlgo: fileNamingAlgo, + Paths: manager.GetInstance().Paths, + } + + deleteGenerated := utils.IsTrue(input.DeleteGenerated) + deleteFile := utils.IsTrue(input.DeleteFile) + destroyFileEntry := utils.IsTrue(input.DestroyFileEntry) + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + for _, id := range audioIDs { + audio, err := qb.Find(ctx, id) + if err != nil { + return err + } + if audio == nil { + return fmt.Errorf("audio with id %d not found", id) + } + + audios = append(audios, audio) + + // kill any running encoders + manager.KillRunningStreamsAudio(audio, fileNamingAlgo) + + if err := r.audioService.Destroy(ctx, audio, fileDeleter, deleteGenerated, deleteFile, destroyFileEntry); err != nil { + return err + } + } + + return nil + }); err != nil { + fileDeleter.Rollback() + return false, err + } + + // perform the post-commit actions + fileDeleter.Commit() + + for _, audio := range audios { + // call post hook after performing the other actions + r.hookExecutor.ExecutePostHooks(ctx, audio.ID, hook.AudioDestroyPost, plugin.AudiosDestroyInput{ + AudiosDestroyInput: input, + Checksum: audio.Checksum, + OSHash: audio.OSHash, + Path: audio.Path, + }, nil) + } + + return true, nil +} + +func (r *mutationResolver) AudioAssignFile(ctx context.Context, input AssignAudioFileInput) (bool, error) { + audioID, err := strconv.Atoi(input.AudioID) + if err != nil { + return false, fmt.Errorf("converting audio id: %w", err) + } + + fileID, err := strconv.Atoi(input.FileID) + if err != nil { + return false, fmt.Errorf("converting file id: %w", err) + } + + if err := r.withTxn(ctx, func(ctx context.Context) error { + return r.Resolver.audioService.AssignFile(ctx, audioID, models.FileID(fileID)) + }); err != nil { + return false, fmt.Errorf("assigning file to audio: %w", err) + } + + return true, nil +} + +func (r *mutationResolver) AudioMerge(ctx context.Context, input AudioMergeInput) (*models.Audio, error) { + srcIDs, err := stringslice.StringSliceToIntSlice(input.Source) + if err != nil { + return nil, fmt.Errorf("converting source ids: %w", err) + } + + destID, err := strconv.Atoi(input.Destination) + if err != nil { + return nil, fmt.Errorf("converting destination id: %w", err) + } + + var values *models.AudioPartial + var coverImageData []byte + var customFields *models.CustomFieldsInput + + if input.Values != nil { + translator := changesetTranslator{ + inputMap: getNamedUpdateInputMap(ctx, "input.values"), + } + + values, err = audioPartialFromInput(*input.Values, translator) + if err != nil { + return nil, err + } + + if input.Values.CoverImage != nil { + var err error + coverImageData, err = utils.ProcessImageInput(ctx, *input.Values.CoverImage) + if err != nil { + return nil, fmt.Errorf("processing cover image: %w", err) + } + } + + if input.Values.CustomFields != nil { + cf := handleUpdateCustomFields(*input.Values.CustomFields) + customFields = &cf + } + } else { + v := models.NewAudioPartial() + values = &v + } + + mgr := manager.GetInstance() + trashPath := mgr.Config.GetDeleteTrashPath() + fileDeleter := &audio.FileDeleter{ + Deleter: file.NewDeleterWithTrash(trashPath), + FileNamingAlgo: mgr.Config.GetAudioFileNamingAlgorithm(), + Paths: mgr.Paths, + } + + var ret *models.Audio + if err := r.withTxn(ctx, func(ctx context.Context) error { + if err := r.Resolver.audioService.Merge(ctx, srcIDs, destID, fileDeleter, audio.MergeOptions{ + AudioPartial: *values, + IncludePlayHistory: utils.IsTrue(input.PlayHistory), + IncludeOHistory: utils.IsTrue(input.OHistory), + }); err != nil { + return err + } + + ret, err = r.Resolver.repository.Audio.Find(ctx, destID) + if err != nil { + return err + } + if ret == nil { + return fmt.Errorf("audio with id %d not found", destID) + } + + // only update cover image if one was provided + if len(coverImageData) > 0 { + if err := r.audioUpdateCoverImage(ctx, ret, coverImageData); err != nil { + return err + } + } + + if customFields != nil { + if err := r.Resolver.repository.Audio.SetCustomFields(ctx, ret.ID, *customFields); err != nil { + return err + } + } + + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (r *mutationResolver) AudioSaveActivity(ctx context.Context, id string, resumeTime *float64, playDuration *float64) (ret bool, err error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return false, fmt.Errorf("converting id: %w", err) + } + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + ret, err = qb.SaveActivity(ctx, audioID, resumeTime, playDuration) + return err + }); err != nil { + return false, err + } + + return ret, nil +} + +func (r *mutationResolver) AudioResetActivity(ctx context.Context, id string, resetResume *bool, resetDuration *bool) (ret bool, err error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return false, fmt.Errorf("converting id: %w", err) + } + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + ret, err = qb.ResetActivity(ctx, audioID, utils.IsTrue(resetResume), utils.IsTrue(resetDuration)) + return err + }); err != nil { + return false, err + } + + return ret, nil +} + +// deprecated +func (r *mutationResolver) AudioIncrementPlayCount(ctx context.Context, id string) (ret int, err error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return 0, fmt.Errorf("converting id: %w", err) + } + + var updatedTimes []time.Time + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + updatedTimes, err = qb.AddViews(ctx, audioID, nil) + return err + }); err != nil { + return 0, err + } + + return len(updatedTimes), nil +} + +func (r *mutationResolver) AudioAddPlay(ctx context.Context, id string, t []*time.Time) (*HistoryMutationResult, error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return nil, fmt.Errorf("converting id: %w", err) + } + + var times []time.Time + + // convert time to local time, so that sorting is consistent + for _, tt := range t { + times = append(times, tt.Local()) + } + + var updatedTimes []time.Time + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + updatedTimes, err = qb.AddViews(ctx, audioID, times) + return err + }); err != nil { + return nil, err + } + + return &HistoryMutationResult{ + Count: len(updatedTimes), + History: sliceutil.ValuesToPtrs(updatedTimes), + }, nil +} + +func (r *mutationResolver) AudioDeletePlay(ctx context.Context, id string, t []*time.Time) (*HistoryMutationResult, error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return nil, err + } + + var times []time.Time + + for _, tt := range t { + times = append(times, *tt) + } + + var updatedTimes []time.Time + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + updatedTimes, err = qb.DeleteViews(ctx, audioID, times) + return err + }); err != nil { + return nil, err + } + + return &HistoryMutationResult{ + Count: len(updatedTimes), + History: sliceutil.ValuesToPtrs(updatedTimes), + }, nil +} + +func (r *mutationResolver) AudioResetPlayCount(ctx context.Context, id string) (ret int, err error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return 0, err + } + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + ret, err = qb.DeleteAllViews(ctx, audioID) + return err + }); err != nil { + return 0, err + } + + return ret, nil +} + +// deprecated +func (r *mutationResolver) AudioIncrementO(ctx context.Context, id string) (ret int, err error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return 0, fmt.Errorf("converting id: %w", err) + } + + var updatedTimes []time.Time + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + updatedTimes, err = qb.AddO(ctx, audioID, nil) + return err + }); err != nil { + return 0, err + } + + return len(updatedTimes), nil +} + +// deprecated +func (r *mutationResolver) AudioDecrementO(ctx context.Context, id string) (ret int, err error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return 0, fmt.Errorf("converting id: %w", err) + } + + var updatedTimes []time.Time + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + updatedTimes, err = qb.DeleteO(ctx, audioID, nil) + return err + }); err != nil { + return 0, err + } + + return len(updatedTimes), nil +} + +func (r *mutationResolver) AudioResetO(ctx context.Context, id string) (ret int, err error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return 0, fmt.Errorf("converting id: %w", err) + } + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + ret, err = qb.ResetO(ctx, audioID) + return err + }); err != nil { + return 0, err + } + + return ret, nil +} + +func (r *mutationResolver) AudioAddO(ctx context.Context, id string, t []*time.Time) (*HistoryMutationResult, error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return nil, fmt.Errorf("converting id: %w", err) + } + + var times []time.Time + + // convert time to local time, so that sorting is consistent + for _, tt := range t { + times = append(times, tt.Local()) + } + + var updatedTimes []time.Time + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + updatedTimes, err = qb.AddO(ctx, audioID, times) + return err + }); err != nil { + return nil, err + } + + return &HistoryMutationResult{ + Count: len(updatedTimes), + History: sliceutil.ValuesToPtrs(updatedTimes), + }, nil +} + +func (r *mutationResolver) AudioDeleteO(ctx context.Context, id string, t []*time.Time) (*HistoryMutationResult, error) { + audioID, err := strconv.Atoi(id) + if err != nil { + return nil, fmt.Errorf("converting id: %w", err) + } + + var times []time.Time + + for _, tt := range t { + times = append(times, *tt) + } + + var updatedTimes []time.Time + + if err := r.withTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + + updatedTimes, err = qb.DeleteO(ctx, audioID, times) + return err + }); err != nil { + return nil, err + } + + return &HistoryMutationResult{ + Count: len(updatedTimes), + History: sliceutil.ValuesToPtrs(updatedTimes), + }, nil +} + +func (r *mutationResolver) AudioGenerateScreenshot(ctx context.Context, id string, at *float64) (string, error) { + if at != nil { + manager.GetInstance().GenerateScreenshot(ctx, id, *at) + } else { + manager.GetInstance().GenerateDefaultScreenshot(ctx, id) + } + + return "todo", nil +} diff --git a/internal/api/resolver_query_audio.go b/internal/api/resolver_query_audio.go new file mode 100644 index 0000000000..55d8682b17 --- /dev/null +++ b/internal/api/resolver_query_audio.go @@ -0,0 +1,45 @@ +package api + +import ( + "context" + "fmt" + "strconv" + + "github.com/stashapp/stash/internal/api/urlbuilders" + "github.com/stashapp/stash/internal/manager" + "github.com/stashapp/stash/pkg/models" +) + +func (r *queryResolver) AudioStreams(ctx context.Context, id *string) ([]*manager.AudioStreamEndpoint, error) { + audioID, err := strconv.Atoi(*id) + if err != nil { + return nil, err + } + + // find the audio + var audio *models.Audio + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + var err error + audio, err = r.repository.Audio.Find(ctx, audioID) + + if audio != nil { + err = audio.LoadPrimaryFile(ctx, r.repository.File) + } + + return err + }); err != nil { + return nil, err + } + + if audio == nil { + return nil, fmt.Errorf("audio with id %d not found", audioID) + } + + config := manager.GetInstance().Config + + baseURL, _ := ctx.Value(BaseURLCtxKey).(string) + builder := urlbuilders.NewAudioURLBuilder(baseURL, audio) + apiKey := config.GetAPIKey() + + return manager.GetAudioStreamPaths(audio, builder.GetStreamURL(apiKey), config.GetMaxStreamingTranscodeSize()) +} diff --git a/internal/api/resolver_query_find_audio.go b/internal/api/resolver_query_find_audio.go new file mode 100644 index 0000000000..8fd8976bba --- /dev/null +++ b/internal/api/resolver_query_find_audio.go @@ -0,0 +1,260 @@ +// TODO(audio): update this file + +package api + +import ( + "context" + "slices" + "strconv" + + "github.com/99designs/gqlgen/graphql" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/audio" +) + +func (r *queryResolver) FindAudio(ctx context.Context, id *string, checksum *string) (*models.Audio, error) { + var audio *models.Audio + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + var err error + if id != nil { + idInt, err := strconv.Atoi(*id) + if err != nil { + return err + } + audio, err = qb.Find(ctx, idInt) + if err != nil { + return err + } + } else if checksum != nil { + var audios []*models.Audio + audios, err = qb.FindByChecksum(ctx, *checksum) + if len(audios) > 0 { + audio = audios[0] + } + } + + return err + }); err != nil { + return nil, err + } + + return audio, nil +} + +func (r *queryResolver) FindAudioByHash(ctx context.Context, input AudioHashInput) (*models.Audio, error) { + var audio *models.Audio + + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + qb := r.repository.Audio + if input.Checksum != nil { + audios, err := qb.FindByChecksum(ctx, *input.Checksum) + if err != nil { + return err + } + if len(audios) > 0 { + audio = audios[0] + } + } + + if audio == nil && input.Oshash != nil { + audios, err := qb.FindByOSHash(ctx, *input.Oshash) + if err != nil { + return err + } + if len(audios) > 0 { + audio = audios[0] + } + } + + return nil + }); err != nil { + return nil, err + } + + return audio, nil +} + +func (r *queryResolver) FindAudios( + ctx context.Context, + audioFilter *models.AudioFilterType, + audioIDs []int, + ids []string, + filter *models.FindFilterType, +) (ret *FindAudiosResultType, err error) { + if len(ids) > 0 { + audioIDs, err = handleIDList(ids, "ids") + if err != nil { + return nil, err + } + } + + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + var audios []*models.Audio + var err error + + fields := graphql.CollectAllFields(ctx) + result := &models.AudioQueryResult{} + + if len(audioIDs) > 0 { + audios, err = r.repository.Audio.FindMany(ctx, audioIDs) + if err == nil { + result.Count = len(audios) + for _, s := range audios { + if err = s.LoadPrimaryFile(ctx, r.repository.File); err != nil { + break + } + + f := s.Files.Primary() + if f == nil { + continue + } + + result.TotalDuration += f.Duration + + result.TotalSize += float64(f.Size) + } + } + } else { + result, err = r.repository.Audio.Query(ctx, models.AudioQueryOptions{ + QueryOptions: models.QueryOptions{ + FindFilter: filter, + Count: slices.Contains(fields, "count"), + }, + AudioFilter: audioFilter, + TotalDuration: slices.Contains(fields, "duration"), + TotalSize: slices.Contains(fields, "filesize"), + }) + if err == nil { + audios, err = result.Resolve(ctx) + } + } + + if err != nil { + return err + } + + ret = &FindAudiosResultType{ + Count: result.Count, + Audios: audios, + Duration: result.TotalDuration, + Filesize: result.TotalSize, + } + + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (r *queryResolver) FindAudiosByPathRegex(ctx context.Context, filter *models.FindFilterType) (ret *FindAudiosResultType, err error) { + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + + audioFilter := &models.AudioFilterType{} + + if filter != nil && filter.Q != nil { + audioFilter.Path = &models.StringCriterionInput{ + Modifier: models.CriterionModifierMatchesRegex, + Value: "(?i)" + *filter.Q, + } + } + + // make a copy of the filter if provided, nilling out Q + var queryFilter *models.FindFilterType + if filter != nil { + f := *filter + queryFilter = &f + queryFilter.Q = nil + } + + fields := graphql.CollectAllFields(ctx) + + result, err := r.repository.Audio.Query(ctx, models.AudioQueryOptions{ + QueryOptions: models.QueryOptions{ + FindFilter: queryFilter, + Count: slices.Contains(fields, "count"), + }, + AudioFilter: audioFilter, + TotalDuration: slices.Contains(fields, "duration"), + TotalSize: slices.Contains(fields, "filesize"), + }) + if err != nil { + return err + } + + audios, err := result.Resolve(ctx) + if err != nil { + return err + } + + ret = &FindAudiosResultType{ + Count: result.Count, + Audios: audios, + Duration: result.TotalDuration, + Filesize: result.TotalSize, + } + + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (r *queryResolver) ParseAudioFilenames(ctx context.Context, filter *models.FindFilterType, config models.AudioParserInput) (ret *AudioParserResultType, err error) { + repo := audio.NewFilenameParserRepository(r.repository) + parser := audio.NewFilenameParser(filter, config, repo) + + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + result, count, err := parser.Parse(ctx) + + if err != nil { + return err + } + + ret = &AudioParserResultType{ + Count: count, + Results: result, + } + + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (r *queryResolver) FindDuplicateAudios(ctx context.Context, distance *int, durationDiff *float64) (ret [][]*models.Audio, err error) { + dist := 0 + durDiff := -1. + if distance != nil { + dist = *distance + } + if durationDiff != nil { + durDiff = *durationDiff + } + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Audio.FindDuplicates(ctx, dist, durDiff) + return err + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (r *queryResolver) AllAudios(ctx context.Context) (ret []*models.Audio, err error) { + if err := r.withReadTxn(ctx, func(ctx context.Context) error { + ret, err = r.repository.Audio.All(ctx) + return err + }); err != nil { + return nil, err + } + + return ret, nil +} diff --git a/internal/api/server.go b/internal/api/server.go index a7516da521..5703ea9846 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -159,12 +159,14 @@ func Initialize() (*Server, error) { pluginCache := mgr.PluginCache sceneService := mgr.SceneService + audioService := mgr.AudioService imageService := mgr.ImageService galleryService := mgr.GalleryService groupService := mgr.GroupService resolver := &Resolver{ repository: repo, sceneService: sceneService, + audioService: audioService, imageService: imageService, galleryService: galleryService, groupService: groupService, diff --git a/internal/api/urlbuilders/audio.go b/internal/api/urlbuilders/audio.go new file mode 100644 index 0000000000..1ac73e81ec --- /dev/null +++ b/internal/api/urlbuilders/audio.go @@ -0,0 +1,68 @@ +// TODO(audio): updaqte this file + +package urlbuilders + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/stashapp/stash/pkg/models" +) + +type AudioURLBuilder struct { + BaseURL string + AudioID string + UpdatedAt string +} + +func NewAudioURLBuilder(baseURL string, audio *models.Audio) AudioURLBuilder { + return AudioURLBuilder{ + BaseURL: baseURL, + AudioID: strconv.Itoa(audio.ID), + UpdatedAt: strconv.FormatInt(audio.UpdatedAt.Unix(), 10), + } +} + +func (b AudioURLBuilder) GetStreamURL(apiKey string) *url.URL { + u, err := url.Parse(fmt.Sprintf("%s/audio/%s/stream", b.BaseURL, b.AudioID)) + if err != nil { + // shouldn't happen + panic(err) + } + + if apiKey != "" { + v := u.Query() + v.Set("apikey", apiKey) + u.RawQuery = v.Encode() + } + return u +} + +func (b AudioURLBuilder) GetStreamPreviewURL() string { + return b.BaseURL + "/audio/" + b.AudioID + "/preview" +} + +func (b AudioURLBuilder) GetStreamPreviewImageURL() string { + return b.BaseURL + "/audio/" + b.AudioID + "/webp" +} + +func (b AudioURLBuilder) GetSpriteVTTURL(checksum string) string { + return b.BaseURL + "/audio/" + checksum + "_thumbs.vtt" +} + +func (b AudioURLBuilder) GetSpriteURL(checksum string) string { + return b.BaseURL + "/audio/" + checksum + "_sprite.jpg" +} + +func (b AudioURLBuilder) GetScreenshotURL() string { + return b.BaseURL + "/audio/" + b.AudioID + "/screenshot?t=" + b.UpdatedAt +} + +func (b AudioURLBuilder) GetFunscriptURL() string { + return b.BaseURL + "/audio/" + b.AudioID + "/funscript" +} + +func (b AudioURLBuilder) GetCaptionURL() string { + return b.BaseURL + "/audio/" + b.AudioID + "/caption" +} diff --git a/internal/identify/identify.go b/internal/identify/identify.go index 6dc67dac3c..847f284e19 100644 --- a/internal/identify/identify.go +++ b/internal/identify/identify.go @@ -1,3 +1,4 @@ +// TODO(audio): update this file // Package identify provides the scene identification functionality for the application. // The identify functionality uses scene scrapers to identify a given scene and // set its metadata based on the scraped data. diff --git a/internal/manager/audio.go b/internal/manager/audio.go new file mode 100644 index 0000000000..637e6527e2 --- /dev/null +++ b/internal/manager/audio.go @@ -0,0 +1,213 @@ +// TODO(audio): update this file +package manager + +import ( + "fmt" + "net/url" + + "github.com/stashapp/stash/internal/manager/config" + "github.com/stashapp/stash/pkg/ffmpeg" + "github.com/stashapp/stash/pkg/fsutil" + "github.com/stashapp/stash/pkg/models" +) + +type AudioStreamEndpoint struct { + URL string `json:"url"` + MimeType *string `json:"mime_type"` + Label *string `json:"label"` +} + +var ( + // TODO(audio): figure out what stream types we need, and what we can support + directAudioEndpointType = endpointType{ + label: "Direct stream", + mimeType: ffmpeg.MimeMp4Audio, + extension: "", + } + mp3AudioEndpointType = endpointType{ + label: "MP3", + mimeType: ffmpeg.MimeMp3Audio, + extension: ".mp3", + } +) + +func GetAudioFileContainer(file *models.AudioFile) (ffmpeg.Container, error) { + var container ffmpeg.Container + format := file.Format + if format != "" { + container = ffmpeg.Container(format) + } else { // container isn't in the DB + // shouldn't happen, fallback to ffprobe + ffprobe := GetInstance().FFProbe + tmpAudioFile, err := ffprobe.NewAudioFile(file.Path) + if err != nil { + return ffmpeg.Container(""), fmt.Errorf("error reading video file: %v", err) + } + + return ffmpeg.MatchContainer(tmpAudioFile.Container, file.Path) + } + + return container, nil +} + +func GetAudioStreamPaths(audio *models.Audio, directStreamURL *url.URL, maxStreamingTranscodeSize models.StreamingResolutionEnum) ([]*AudioStreamEndpoint, error) { + if audio == nil { + return nil, fmt.Errorf("nil audio") + } + + pf := audio.Files.Primary() + if pf == nil { + return nil, nil + } + + // convert StreamingResolutionEnum to ResolutionEnum + maxStreamingResolution := models.ResolutionEnum(maxStreamingTranscodeSize) + audioResolution := models.GetMinResolution(pf) + includeAudioStreamPath := func(streamingResolution models.StreamingResolutionEnum) bool { + var minResolution int + if streamingResolution == models.StreamingResolutionEnumOriginal { + minResolution = audioResolution + } else { + // convert StreamingResolutionEnum to ResolutionEnum so we can get the min + // resolution + convertedRes := models.ResolutionEnum(streamingResolution) + minResolution = convertedRes.GetMinResolution() + + // don't include if audio resolution is smaller than the streamingResolution + if audioResolution != 0 && audioResolution < minResolution { + return false + } + } + + // if we always allow everything, then return true + if maxStreamingTranscodeSize == models.StreamingResolutionEnumOriginal { + return true + } + + return maxStreamingResolution.GetMinResolution() >= minResolution + } + + makeStreamEndpoint := func(t endpointType, resolution models.StreamingResolutionEnum) *AudioStreamEndpoint { + url := *directStreamURL + url.Path += t.extension + + label := t.label + + if resolution != "" { + v := url.Query() + v.Set("resolution", resolution.String()) + url.RawQuery = v.Encode() + + switch resolution { + case models.StreamingResolutionEnumFourK: + label += " 4K (2160p)" + case models.StreamingResolutionEnumFullHd: + label += " Full HD (1080p)" + case models.StreamingResolutionEnumStandardHd: + label += " HD (720p)" + case models.StreamingResolutionEnumStandard: + label += " Standard (480p)" + case models.StreamingResolutionEnumLow: + label += " Low (240p)" + } + } + + return &AudioStreamEndpoint{ + URL: url.String(), + MimeType: &t.mimeType, + Label: &label, + } + } + + var endpoints []*AudioStreamEndpoint + + // direct stream should only apply when the audio codec is supported + audioCodec := ffmpeg.MissingUnsupported + if pf.AudioCodec != "" { + audioCodec = ffmpeg.ProbeAudioCodec(pf.AudioCodec) + } + + // don't care if we can't get the container + container, _ := GetAudioFileContainer(pf) + + if HasAudioTranscode(audio, config.GetInstance().GetAudioFileNamingAlgorithm()) || ffmpeg.IsValidAudioForContainer(audioCodec, container) { + endpoints = append(endpoints, makeStreamEndpoint(directAudioEndpointType, "")) + } + + // only add mkv stream endpoint if the audio container is an mkv already + if container == ffmpeg.Matroska { + endpoints = append(endpoints, makeStreamEndpoint(mkvAudioEndpointType, "")) + } + + mp4Streams := []*AudioStreamEndpoint{} + webmStreams := []*AudioStreamEndpoint{} + hlsStreams := []*AudioStreamEndpoint{} + dashStreams := []*AudioStreamEndpoint{} + + if includeAudioStreamPath(models.StreamingResolutionEnumOriginal) { + mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumOriginal)) + webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumOriginal)) + hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumOriginal)) + dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumOriginal)) + } + + if includeAudioStreamPath(models.StreamingResolutionEnumFourK) { + mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumFourK)) + webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumFourK)) + hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumFourK)) + dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumFourK)) + } + + if includeAudioStreamPath(models.StreamingResolutionEnumFullHd) { + mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumFullHd)) + webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumFullHd)) + hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumFullHd)) + dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumFullHd)) + } + + if includeAudioStreamPath(models.StreamingResolutionEnumStandardHd) { + mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumStandardHd)) + webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumStandardHd)) + hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumStandardHd)) + dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumStandardHd)) + } + + if includeAudioStreamPath(models.StreamingResolutionEnumStandard) { + mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumStandard)) + webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumStandard)) + hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumStandard)) + dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumStandard)) + } + + if includeAudioStreamPath(models.StreamingResolutionEnumLow) { + mp4Streams = append(mp4Streams, makeStreamEndpoint(mp3AudioEndpointType, models.StreamingResolutionEnumLow)) + webmStreams = append(webmStreams, makeStreamEndpoint(webmAudioEndpointType, models.StreamingResolutionEnumLow)) + hlsStreams = append(hlsStreams, makeStreamEndpoint(hlsAudioEndpointType, models.StreamingResolutionEnumLow)) + dashStreams = append(dashStreams, makeStreamEndpoint(dashAudioEndpointType, models.StreamingResolutionEnumLow)) + } + + endpoints = append(endpoints, mp4Streams...) + endpoints = append(endpoints, webmStreams...) + endpoints = append(endpoints, hlsStreams...) + endpoints = append(endpoints, dashStreams...) + + return endpoints, nil +} + +// HasAudioTranscode returns true if a transcoded video exists for the provided +// audio. It will check using the OSHash of the audio first, then fall back +// to the checksum. +func HasAudioTranscode(audio *models.Audio, fileNamingAlgo models.HashAlgorithm) bool { + if audio == nil { + return false + } + + audioHash := audio.GetHash(fileNamingAlgo) + if audioHash == "" { + return false + } + + transcodePath := instance.Paths.Audio.GetTranscodePath(audioHash) + ret, _ := fsutil.FileExists(transcodePath) + return ret +} diff --git a/internal/manager/config/config.go b/internal/manager/config/config.go index 19e2638109..aac3d4b685 100644 --- a/internal/manager/config/config.go +++ b/internal/manager/config/config.go @@ -836,6 +836,20 @@ func (i *Config) GetVideoFileNamingAlgorithm() models.HashAlgorithm { return models.HashAlgorithm(ret) } +// GetAudioFileNamingAlgorithm returns what hash algorithm should be used for +// naming generated audio files. +func (i *Config) GetAudioFileNamingAlgorithm() models.HashAlgorithm { + // TODO(audio): update this to AudioFileNamingAlgorithm? + ret := i.getString(VideoFileNamingAlgorithm) + + // default to oshash + if ret == "" { + return models.HashAlgorithmOshash + } + + return models.HashAlgorithm(ret) +} + func (i *Config) GetSequentialScanning() bool { return i.getBool(SequentialScanning) } diff --git a/internal/manager/init.go b/internal/manager/init.go index b4af5eab78..65b2340afb 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -55,6 +55,14 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { Config: cfg, } + audioService := &audio.Service{ + File: db.File, + Repository: db.Audio, + PluginCache: pluginCache, + Paths: mgrPaths, + Config: cfg, + } + imageService := &image.Service{ File: db.File, Repository: db.Image, @@ -102,6 +110,7 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { Repository: repo, SceneService: sceneService, + AudioService: audioService, ImageService: imageService, GalleryService: galleryService, GroupService: groupService, diff --git a/internal/manager/manager.go b/internal/manager/manager.go index d3b91ec299..9571988cb0 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -64,6 +64,7 @@ type Manager struct { Repository models.Repository SceneService SceneService + AudioService AudioService ImageService ImageService GalleryService GalleryService GroupService GroupService diff --git a/internal/manager/manager_tasks.go b/internal/manager/manager_tasks.go index 76938e9fff..8c4f33194d 100644 --- a/internal/manager/manager_tasks.go +++ b/internal/manager/manager_tasks.go @@ -331,6 +331,7 @@ func (s *Manager) Clean(ctx context.Context, input CleanMetadataInput) int { j := cleanJob{ cleaner: cleaner, repository: s.Repository, + audioService: s.AudioService, sceneService: s.SceneService, imageService: s.ImageService, input: input, diff --git a/internal/manager/repository.go b/internal/manager/repository.go index 65514ed1da..73dd9605e0 100644 --- a/internal/manager/repository.go +++ b/internal/manager/repository.go @@ -7,6 +7,7 @@ import ( "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/scene" + "github.com/stashapp/stash/pkg/audio" ) type SceneService interface { @@ -19,6 +20,16 @@ type SceneService interface { sceneFingerprintGetter } +type AudioService interface { + Create(ctx context.Context, input models.CreateAudioInput) (*models.Audio, error) + AssignFile(ctx context.Context, audioID int, fileID models.FileID) error + Merge(ctx context.Context, sourceIDs []int, destinationID int, fileDeleter *audio.FileDeleter, options audio.MergeOptions) error + Destroy(ctx context.Context, audio *models.Audio, fileDeleter *audio.FileDeleter, deleteGenerated, deleteFile, destroyFileEntry bool) error + + FindByIDs(ctx context.Context, ids []int, load ...audio.LoadRelationshipOption) ([]*models.Audio, error) + audioFingerprintGetter +} + type ImageService interface { Destroy(ctx context.Context, image *models.Image, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile, destroyFileEntry bool) error DestroyZipImages(ctx context.Context, zipFile models.File, fileDeleter *image.FileDeleter, deleteGenerated bool) ([]*models.Image, error) diff --git a/internal/manager/running_streams.go b/internal/manager/running_streams.go index 18ac3b0426..07dd045d02 100644 --- a/internal/manager/running_streams.go +++ b/internal/manager/running_streams.go @@ -30,6 +30,19 @@ func KillRunningStreams(scene *models.Scene, fileNamingAlgo models.HashAlgorithm instance.ReadLockManager.Cancel(transcodePath) } +func KillRunningStreamsAudio(audio *models.Audio, fileNamingAlgo models.HashAlgorithm) { + instance.ReadLockManager.Cancel(audio.Path) + + audioHash := audio.GetHash(fileNamingAlgo) + + if audioHash == "" { + return + } + + transcodePath := GetInstance().Paths.Audio.GetTranscodePath(audioHash) + instance.ReadLockManager.Cancel(transcodePath) +} + type SceneCoverGetter interface { GetCover(ctx context.Context, sceneID int) ([]byte, error) } diff --git a/internal/manager/task_clean.go b/internal/manager/task_clean.go index 67b7038b64..2e71167348 100644 --- a/internal/manager/task_clean.go +++ b/internal/manager/task_clean.go @@ -27,6 +27,7 @@ type cleanJob struct { cleaner cleaner repository models.Repository input CleanMetadataInput + audioService AudioService sceneService SceneService imageService ImageService scanSubs *subscriptionManager diff --git a/internal/manager/task_scan.go b/internal/manager/task_scan.go index 155090cd22..24a1cd076e 100644 --- a/internal/manager/task_scan.go +++ b/internal/manager/task_scan.go @@ -1,3 +1,6 @@ +// TODO(audio): update this file to add Audio scanner, audioFileFilter, new file.FilteredHandler for audio.ScanHandler, +// TODO(audio): [con't] Add audio to extensionConfig, useAsAudio(), newExtensionConfig + package manager import ( @@ -18,6 +21,8 @@ import ( "github.com/stashapp/stash/pkg/file/video" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/gallery" + // TODO(audio): uncomment + // "github.com/stashapp/stash/pkg/audio" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/job" "github.com/stashapp/stash/pkg/logger" @@ -681,6 +686,28 @@ func getScanHandlers(options ScanMetadataInput, taskQueue *job.TaskQueue, progre Paths: instance.Paths, }, }, + // &file.FilteredHandler{ + // Filter: file.FilterFunc(audioFileFilter), + // Handler: &audio.ScanHandler{ + // CreatorUpdater: r.Audio, + // GalleryFinder: r.Gallery, + // SceneFinderUpdater: r.Scene, + // // ScanGenerator: &audioGenerators{ + // // input: options, + // // taskQueue: taskQueue, + // // progress: progress, + // // paths: mgr.Paths, + // // sequentialScanning: c.GetSequentialScanning(), + // // }, + // // ScanConfig: &scanConfig{ + // // isGenerateThumbnails: options.ScanGenerateThumbnails, + // // isGenerateClipPreviews: options.ScanGenerateClipPreviews, + // // createGalleriesFromFolders: c.GetCreateGalleriesFromFolders(), + // // }, + // PluginCache: pluginCache, + // Paths: instance.Paths, + // }, + // }, &file.FilteredHandler{ Filter: file.FilterFunc(galleryFileFilter), Handler: &gallery.ScanHandler{ diff --git a/pkg/audio/create.go b/pkg/audio/create.go new file mode 100644 index 0000000000..bb1dcfeb17 --- /dev/null +++ b/pkg/audio/create.go @@ -0,0 +1,71 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/plugin/hook" +) + +func (s *Service) Create(ctx context.Context, input models.CreateAudioInput) (*models.Audio, error) { + // title must be set if no files are provided + if input.Audio.Title == "" && len(input.FileIDs) == 0 { + return nil, errors.New("title must be set if audio has no files") + } + + now := time.Now() + newAudio := *input.Audio + newAudio.CreatedAt = now + newAudio.UpdatedAt = now + + // don't pass the file ids since they may be already assigned + // assign them afterwards + if err := s.Repository.Create(ctx, &newAudio, nil); err != nil { + return nil, fmt.Errorf("creating new audio: %w", err) + } + + if len(input.CustomFields) > 0 { + if err := s.Repository.SetCustomFields(ctx, newAudio.ID, models.CustomFieldsInput{ + Full: input.CustomFields, + }); err != nil { + return nil, fmt.Errorf("setting custom fields on new audio: %w", err) + } + } + + for _, f := range input.FileIDs { + if err := s.AssignFile(ctx, newAudio.ID, f); err != nil { + return nil, fmt.Errorf("assigning file %d to new audio: %w", f, err) + } + } + + if len(input.FileIDs) > 0 { + // assign the primary to the first + if _, err := s.Repository.UpdatePartial(ctx, newAudio.ID, models.AudioPartial{ + PrimaryFileID: &input.FileIDs[0], + }); err != nil { + return nil, fmt.Errorf("setting primary file on new audio: %w", err) + } + } + + // re-find the audio so that it correctly returns file-related fields + ret, err := s.Repository.Find(ctx, newAudio.ID) + if err != nil { + return nil, err + } + + if len(input.CoverImage) > 0 { + if err := s.Repository.UpdateCover(ctx, ret.ID, input.CoverImage); err != nil { + return nil, fmt.Errorf("setting cover on new audio: %w", err) + } + } + + s.PluginCache.RegisterPostHooks(ctx, ret.ID, hook.AudioCreatePost, nil, nil) + + // re-find the audio so that it correctly returns file-related fields + return ret, nil +} diff --git a/pkg/audio/delete.go b/pkg/audio/delete.go new file mode 100644 index 0000000000..25c24994f3 --- /dev/null +++ b/pkg/audio/delete.go @@ -0,0 +1,195 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "path/filepath" + + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/file/video" + "github.com/stashapp/stash/pkg/fsutil" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/paths" +) + +// FileDeleter is an extension of file.Deleter that handles deletion of audio files. +type FileDeleter struct { + *file.Deleter + + FileNamingAlgo models.HashAlgorithm + Paths *paths.Paths +} + +// MarkGeneratedFiles marks for deletion the generated files for the provided audio. +// Generated files bypass trash and are permanently deleted since they can be regenerated. +func (d *FileDeleter) MarkGeneratedFiles(audio *models.Audio) error { + audioHash := audio.GetHash(d.FileNamingAlgo) + + if audioHash == "" { + return nil + } + + markersFolder := filepath.Join(d.Paths.Generated.Markers, audioHash) + + exists, _ := fsutil.FileExists(markersFolder) + if exists { + if err := d.DirsWithoutTrash([]string{markersFolder}); err != nil { + return err + } + } + + var files []string + + streamPreviewPath := d.Paths.Audio.GetVideoPreviewPath(audioHash) + exists, _ = fsutil.FileExists(streamPreviewPath) + if exists { + files = append(files, streamPreviewPath) + } + + streamPreviewImagePath := d.Paths.Audio.GetWebpPreviewPath(audioHash) + exists, _ = fsutil.FileExists(streamPreviewImagePath) + if exists { + files = append(files, streamPreviewImagePath) + } + + transcodePath := d.Paths.Audio.GetTranscodePath(audioHash) + exists, _ = fsutil.FileExists(transcodePath) + if exists { + files = append(files, transcodePath) + } + + spritePath := d.Paths.Audio.GetSpriteImageFilePath(audioHash) + exists, _ = fsutil.FileExists(spritePath) + if exists { + files = append(files, spritePath) + } + + vttPath := d.Paths.Audio.GetSpriteVttFilePath(audioHash) + exists, _ = fsutil.FileExists(vttPath) + if exists { + files = append(files, vttPath) + } + + return d.FilesWithoutTrash(files) +} + +// Destroy deletes a audio and its associated relationships from the +// database. +func (s *Service) Destroy(ctx context.Context, audio *models.Audio, fileDeleter *FileDeleter, deleteGenerated, deleteFile, destroyFileEntry bool) error { + mqb := s.MarkerRepository + markers, err := mqb.FindByAudioID(ctx, audio.ID) + if err != nil { + return err + } + + for _, m := range markers { + if err := DestroyMarker(ctx, audio, m, mqb, fileDeleter); err != nil { + return err + } + } + + if deleteFile { + if err := s.deleteFiles(ctx, audio, fileDeleter); err != nil { + return err + } + } else if destroyFileEntry { + if err := s.destroyFileEntries(ctx, audio); err != nil { + return err + } + } + + if deleteGenerated { + if err := fileDeleter.MarkGeneratedFiles(audio); err != nil { + return err + } + } + + if err := s.Repository.Destroy(ctx, audio.ID); err != nil { + return err + } + + return nil +} + +// deleteFiles deletes files from the database and file system +func (s *Service) deleteFiles(ctx context.Context, audio *models.Audio, fileDeleter *FileDeleter) error { + if err := audio.LoadFiles(ctx, s.Repository); err != nil { + return err + } + + for _, f := range audio.Files.List() { + // only delete files where there is no other associated audio + otherAudios, err := s.Repository.FindByFileID(ctx, f.ID) + if err != nil { + return err + } + + if len(otherAudios) > 1 { + // other audios associated, don't remove + continue + } + + const deleteFile = true + logger.Info("Deleting audio file: ", f.Path) + if err := file.Destroy(ctx, s.File, f, fileDeleter.Deleter, deleteFile); err != nil { + return err + } + + // don't delete files in zip archives + if f.ZipFileID == nil { + funscriptPath := video.GetFunscriptPath(f.Path) + funscriptExists, _ := fsutil.FileExists(funscriptPath) + if funscriptExists { + if err := fileDeleter.Files([]string{funscriptPath}); err != nil { + return err + } + } + } + } + + return nil +} + +// destroyFileEntries destroys file entries from the database without deleting +// the files from the filesystem +func (s *Service) destroyFileEntries(ctx context.Context, audio *models.Audio) error { + if err := audio.LoadFiles(ctx, s.Repository); err != nil { + return err + } + + for _, f := range audio.Files.List() { + // only destroy file entries where there is no other associated audio + otherAudios, err := s.Repository.FindByFileID(ctx, f.ID) + if err != nil { + return err + } + + if len(otherAudios) > 1 { + // other audios associated, don't remove + continue + } + + const deleteFile = false + logger.Info("Destroying audio file entry: ", f.Path) + if err := file.Destroy(ctx, s.File, f, nil, deleteFile); err != nil { + return err + } + } + + return nil +} + +// DestroyMarker deletes the audio marker from the database and returns a +// function that removes the generated files, to be executed after the +// transaction is successfully committed. +func DestroyMarker(ctx context.Context, audio *models.Audio, audioMarker *models.AudioMarker, qb models.AudioMarkerDestroyer, fileDeleter *FileDeleter) error { + if err := qb.Destroy(ctx, audioMarker.ID); err != nil { + return err + } + + // delete the preview for the marker + seconds := int(audioMarker.Seconds) + return fileDeleter.MarkMarkerFiles(audio, seconds) +} diff --git a/pkg/audio/export.go b/pkg/audio/export.go new file mode 100644 index 0000000000..3bc46eefc9 --- /dev/null +++ b/pkg/audio/export.go @@ -0,0 +1,268 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "fmt" + "math" + "strconv" + + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/json" + "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/sliceutil" + "github.com/stashapp/stash/pkg/utils" +) + +type ExportGetter interface { + models.ViewDateReader + models.ODateReader + models.CustomFieldsReader + GetCover(ctx context.Context, audioID int) ([]byte, error) +} + +type TagFinder interface { + models.TagGetter + FindByAudioID(ctx context.Context, audioID int) ([]*models.Tag, error) + FindByAudioMarkerID(ctx context.Context, audioMarkerID int) ([]*models.Tag, error) +} + +// ToBasicJSON converts a audio object into its JSON object equivalent. It +// does not convert the relationships to other objects, with the exception +// of cover image. +func ToBasicJSON(ctx context.Context, reader ExportGetter, audio *models.Audio) (*jsonschema.Audio, error) { + newAudioJSON := jsonschema.Audio{ + Title: audio.Title, + Code: audio.Code, + URLs: audio.URLs.List(), + Details: audio.Details, + CreatedAt: json.JSONTime{Time: audio.CreatedAt}, + UpdatedAt: json.JSONTime{Time: audio.UpdatedAt}, + } + + if audio.Date != nil { + newAudioJSON.Date = audio.Date.String() + } + + if audio.Rating != nil { + newAudioJSON.Rating = *audio.Rating + } + + newAudioJSON.Organized = audio.Organized + + for _, f := range audio.Files.List() { + newAudioJSON.Files = append(newAudioJSON.Files, f.Base().Path) + } + + cover, err := reader.GetCover(ctx, audio.ID) + if err != nil { + logger.Errorf("Error getting audio cover: %v", err) + } + + if len(cover) > 0 { + newAudioJSON.Cover = utils.GetBase64StringFromData(cover) + } + + dates, err := reader.GetViewDates(ctx, audio.ID) + if err != nil { + return nil, fmt.Errorf("error getting view dates: %v", err) + } + + for _, date := range dates { + newAudioJSON.PlayHistory = append(newAudioJSON.PlayHistory, json.JSONTime{Time: date}) + } + + odates, err := reader.GetODates(ctx, audio.ID) + if err != nil { + return nil, fmt.Errorf("error getting o dates: %v", err) + } + + for _, date := range odates { + newAudioJSON.OHistory = append(newAudioJSON.OHistory, json.JSONTime{Time: date}) + } + + newAudioJSON.CustomFields, err = reader.GetCustomFields(ctx, audio.ID) + if err != nil { + return nil, fmt.Errorf("getting audio custom fields: %v", err) + } + + return &newAudioJSON, nil +} + +// GetStudioName returns the name of the provided audio's studio. It returns an +// empty string if there is no studio assigned to the audio. +func GetStudioName(ctx context.Context, reader models.StudioGetter, audio *models.Audio) (string, error) { + if audio.StudioID != nil { + studio, err := reader.Find(ctx, *audio.StudioID) + if err != nil { + return "", err + } + + if studio != nil { + return studio.Name, nil + } + } + + return "", nil +} + +// GetTagNames returns a slice of tag names corresponding to the provided +// audio's tags. +func GetTagNames(ctx context.Context, reader TagFinder, audio *models.Audio) ([]string, error) { + tags, err := reader.FindByAudioID(ctx, audio.ID) + if err != nil { + return nil, fmt.Errorf("error getting audio tags: %v", err) + } + + return getTagNames(tags), nil +} + +func getTagNames(tags []*models.Tag) []string { + var results []string + for _, tag := range tags { + if tag.Name != "" { + results = append(results, tag.Name) + } + } + + return results +} + +// GetDependentTagIDs returns a slice of unique tag IDs that this audio references. +func GetDependentTagIDs(ctx context.Context, tags TagFinder, markerReader models.AudioMarkerFinder, audio *models.Audio) ([]int, error) { + var ret []int + + t, err := tags.FindByAudioID(ctx, audio.ID) + if err != nil { + return nil, err + } + + for _, tt := range t { + ret = sliceutil.AppendUnique(ret, tt.ID) + } + + sm, err := markerReader.FindByAudioID(ctx, audio.ID) + if err != nil { + return nil, err + } + + for _, smm := range sm { + ret = sliceutil.AppendUnique(ret, smm.PrimaryTagID) + smmt, err := tags.FindByAudioMarkerID(ctx, smm.ID) + if err != nil { + return nil, fmt.Errorf("invalid tags for audio marker: %v", err) + } + + for _, smmtt := range smmt { + ret = sliceutil.AppendUnique(ret, smmtt.ID) + } + } + + return ret, nil +} + +// GetAudioGroupsJSON returns a slice of AudioGroup JSON representation objects +// corresponding to the provided audio's audio group relationships. +func GetAudioGroupsJSON(ctx context.Context, groupReader models.GroupGetter, audio *models.Audio) ([]jsonschema.AudioGroup, error) { + audioGroups := audio.Groups.List() + + var results []jsonschema.AudioGroup + for _, audioGroup := range audioGroups { + group, err := groupReader.Find(ctx, audioGroup.GroupID) + if err != nil { + return nil, fmt.Errorf("error getting group: %v", err) + } + + if group != nil { + audioGroupJSON := jsonschema.AudioGroup{ + GroupName: group.Name, + } + if audioGroup.AudioIndex != nil { + audioGroupJSON.AudioIndex = *audioGroup.AudioIndex + } + results = append(results, audioGroupJSON) + } + } + + return results, nil +} + +// GetDependentGroupIDs returns a slice of group IDs that this audio references. +func GetDependentGroupIDs(ctx context.Context, audio *models.Audio) ([]int, error) { + var ret []int + + m := audio.Groups.List() + for _, mm := range m { + ret = append(ret, mm.GroupID) + } + + return ret, nil +} + +// GetAudioMarkersJSON returns a slice of AudioMarker JSON representation +// objects corresponding to the provided audio's markers. +func GetAudioMarkersJSON(ctx context.Context, markerReader models.AudioMarkerFinder, tagReader TagFinder, audio *models.Audio) ([]jsonschema.AudioMarker, error) { + audioMarkers, err := markerReader.FindByAudioID(ctx, audio.ID) + if err != nil { + return nil, fmt.Errorf("error getting audio markers: %v", err) + } + + var results []jsonschema.AudioMarker + + for _, audioMarker := range audioMarkers { + primaryTag, err := tagReader.Find(ctx, audioMarker.PrimaryTagID) + if err != nil { + return nil, fmt.Errorf("invalid primary tag for audio marker: %v", err) + } + + audioMarkerTags, err := tagReader.FindByAudioMarkerID(ctx, audioMarker.ID) + if err != nil { + return nil, fmt.Errorf("invalid tags for audio marker: %v", err) + } + + audioMarkerJSON := jsonschema.AudioMarker{ + Title: audioMarker.Title, + Seconds: getDecimalString(audioMarker.Seconds), + PrimaryTag: primaryTag.Name, + Tags: getTagNames(audioMarkerTags), + CreatedAt: json.JSONTime{Time: audioMarker.CreatedAt}, + UpdatedAt: json.JSONTime{Time: audioMarker.UpdatedAt}, + } + + if audioMarker.EndSeconds != nil { + audioMarkerJSON.EndSeconds = getDecimalString(*audioMarker.EndSeconds) + } + + results = append(results, audioMarkerJSON) + } + + return results, nil +} + +func getDecimalString(num float64) string { + if num == 0 { + return "" + } + + precision := getPrecision(num) + if precision == 0 { + precision = 1 + } + return fmt.Sprintf("%."+strconv.Itoa(precision)+"f", num) +} + +func getPrecision(num float64) int { + if num == 0 { + return 0 + } + + e := 1.0 + p := 0 + for (math.Round(num*e) / e) != num { + e *= 10 + p++ + } + return p +} diff --git a/pkg/audio/export_test.go b/pkg/audio/export_test.go new file mode 100644 index 0000000000..9646f88ced --- /dev/null +++ b/pkg/audio/export_test.go @@ -0,0 +1,449 @@ +// TODO(audio): update this file + +package audio + +import ( + "errors" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/json" + "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/models/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "testing" + "time" +) + +const ( + audioID = 1 + noImageID = 2 + errImageID = 3 + + studioID = 4 + missingStudioID = 5 + errStudioID = 6 + customFieldsID = 7 + + noTagsID = 11 + errTagsID = 12 + + noGroupsID = 13 + errFindGroupID = 15 + + noMarkersID = 16 + errMarkersID = 17 + errFindPrimaryTagID = 18 + errFindByMarkerID = 19 + errCustomFieldsID = 20 +) + +var ( + url = "url" + title = "title" + date = "2001-01-01" + dateObj, _ = models.ParseDate(date) + rating = 5 + organized = true + details = "details" +) + +var ( + studioName = "studioName" + // galleryChecksum = "galleryChecksum" + + validGroup1 = 1 + validGroup2 = 2 + invalidGroup = 3 + + group1Name = "group1Name" + group2Name = "group2Name" + + group1Audio = 1 + group2Audio = 2 +) + +var names = []string{ + "name1", + "name2", +} + +var imageBytes = []byte("imageBytes") + +var stashID = models.StashID{ + StashID: "StashID", + Endpoint: "Endpoint", +} + +const ( + path = "path" + imageBase64 = "aW1hZ2VCeXRlcw==" +) + +var ( + createTime = time.Date(2001, 01, 01, 0, 0, 0, 0, time.UTC) + updateTime = time.Date(2002, 01, 01, 0, 0, 0, 0, time.UTC) +) + +var ( + emptyCustomFields = make(map[string]interface{}) + customFields = map[string]interface{}{ + "customField1": "customValue1", + } +) + +func createFullAudio(id int) models.Audio { + return models.Audio{ + ID: id, + Title: title, + Date: &dateObj, + Details: details, + Rating: &rating, + Organized: organized, + URLs: models.NewRelatedStrings([]string{url}), + Files: models.NewRelatedVideoFiles([]*models.VideoFile{ + { + BaseFile: &models.BaseFile{ + Path: path, + }, + }, + }), + CreatedAt: createTime, + UpdatedAt: updateTime, + } +} + +func createEmptyAudio(id int) models.Audio { + return models.Audio{ + ID: id, + Files: models.NewRelatedVideoFiles([]*models.VideoFile{ + { + BaseFile: &models.BaseFile{ + Path: path, + }, + }, + }), + URLs: models.NewRelatedStrings([]string{}), + CreatedAt: createTime, + UpdatedAt: updateTime, + } +} + +func createFullJSONAudio(image string, customFields map[string]interface{}) *jsonschema.Audio { + return &jsonschema.Audio{ + Title: title, + Files: []string{path}, + Date: date, + Details: details, + Rating: rating, + Organized: organized, + URLs: []string{url}, + CreatedAt: json.JSONTime{ + Time: createTime, + }, + UpdatedAt: json.JSONTime{ + Time: updateTime, + }, + Cover: image, + CustomFields: customFields, + } +} + +func createEmptyJSONAudio() *jsonschema.Audio { + return &jsonschema.Audio{ + URLs: []string{}, + Files: []string{path}, + CreatedAt: json.JSONTime{ + Time: createTime, + }, + UpdatedAt: json.JSONTime{ + Time: updateTime, + }, + CustomFields: emptyCustomFields, + } +} + +type basicTestScenario struct { + input models.Audio + customFields map[string]interface{} + expected *jsonschema.Audio + err bool +} + +var scenarios = []basicTestScenario{ + { + createFullAudio(audioID), + emptyCustomFields, + createFullJSONAudio(imageBase64, emptyCustomFields), + false, + }, + { + createFullAudio(customFieldsID), + customFields, + createFullJSONAudio("", customFields), + false, + }, + { + createEmptyAudio(noImageID), + emptyCustomFields, + createEmptyJSONAudio(), + false, + }, + { + createFullAudio(errImageID), + emptyCustomFields, + createFullJSONAudio("", emptyCustomFields), + // failure to get image should not cause an error + false, + }, + { + createFullAudio(errCustomFieldsID), + customFields, + createFullJSONAudio("", customFields), + true, + }, +} + +func TestToJSON(t *testing.T) { + db := mocks.NewDatabase() + + imageErr := errors.New("error getting image") + + db.Audio.On("GetCover", testCtx, audioID).Return(imageBytes, nil).Once() + db.Audio.On("GetCover", testCtx, noImageID).Return(nil, nil).Once() + db.Audio.On("GetCover", testCtx, errImageID).Return(nil, imageErr).Once() + db.Audio.On("GetCover", testCtx, mock.Anything).Return(nil, nil) + db.Audio.On("GetViewDates", testCtx, mock.Anything).Return(nil, nil) + db.Audio.On("GetODates", testCtx, mock.Anything).Return(nil, nil) + db.Audio.On("GetCustomFields", testCtx, customFieldsID).Return(customFields, nil).Once() + db.Audio.On("GetCustomFields", testCtx, errCustomFieldsID).Return(nil, errors.New("error getting custom fields")).Once() + db.Audio.On("GetCustomFields", testCtx, mock.Anything).Return(emptyCustomFields, nil) + + for i, s := range scenarios { + audio := s.input + json, err := ToBasicJSON(testCtx, db.Audio, &audio) + + switch { + case !s.err && err != nil: + t.Errorf("[%d] unexpected error: %s", i, err.Error()) + case s.err && err == nil: + t.Errorf("[%d] expected error not returned", i) + case err != nil: + // error case already handled, no need for assertion + default: + assert.Equal(t, s.expected, json, "[%d]", i) + } + } + + db.AssertExpectations(t) +} + +func createStudioAudio(studioID int) models.Audio { + return models.Audio{ + StudioID: &studioID, + } +} + +type stringTestScenario struct { + input models.Audio + expected string + err bool +} + +var getStudioScenarios = []stringTestScenario{ + { + createStudioAudio(studioID), + studioName, + false, + }, + { + createStudioAudio(missingStudioID), + "", + false, + }, + { + createStudioAudio(errStudioID), + "", + true, + }, +} + +func TestGetStudioName(t *testing.T) { + db := mocks.NewDatabase() + + studioErr := errors.New("error getting image") + + db.Studio.On("Find", testCtx, studioID).Return(&models.Studio{ + Name: studioName, + }, nil).Once() + db.Studio.On("Find", testCtx, missingStudioID).Return(nil, nil).Once() + db.Studio.On("Find", testCtx, errStudioID).Return(nil, studioErr).Once() + + for i, s := range getStudioScenarios { + audio := s.input + json, err := GetStudioName(testCtx, db.Studio, &audio) + + switch { + case !s.err && err != nil: + t.Errorf("[%d] unexpected error: %s", i, err.Error()) + case s.err && err == nil: + t.Errorf("[%d] expected error not returned", i) + default: + assert.Equal(t, s.expected, json, "[%d]", i) + } + } + + db.AssertExpectations(t) +} + +type stringSliceTestScenario struct { + input models.Audio + expected []string + err bool +} + +var getTagNamesScenarios = []stringSliceTestScenario{ + { + createEmptyAudio(audioID), + names, + false, + }, + { + createEmptyAudio(noTagsID), + nil, + false, + }, + { + createEmptyAudio(errTagsID), + nil, + true, + }, +} + +func getTags(names []string) []*models.Tag { + var ret []*models.Tag + for _, n := range names { + ret = append(ret, &models.Tag{ + Name: n, + }) + } + + return ret +} + +func TestGetTagNames(t *testing.T) { + db := mocks.NewDatabase() + + tagErr := errors.New("error getting tag") + + db.Tag.On("FindByAudioID", testCtx, audioID).Return(getTags(names), nil).Once() + db.Tag.On("FindByAudioID", testCtx, noTagsID).Return(nil, nil).Once() + db.Tag.On("FindByAudioID", testCtx, errTagsID).Return(nil, tagErr).Once() + + for i, s := range getTagNamesScenarios { + audio := s.input + json, err := GetTagNames(testCtx, db.Tag, &audio) + + switch { + case !s.err && err != nil: + t.Errorf("[%d] unexpected error: %s", i, err.Error()) + case s.err && err == nil: + t.Errorf("[%d] expected error not returned", i) + default: + assert.Equal(t, s.expected, json, "[%d]", i) + } + } + + db.AssertExpectations(t) +} + +type audioGroupsTestScenario struct { + input models.Audio + expected []jsonschema.AudioGroup + err bool +} + +var validGroups = models.NewRelatedGroups([]models.GroupsAudios{ + { + GroupID: validGroup1, + AudioIndex: &group1Audio, + }, + { + GroupID: validGroup2, + AudioIndex: &group2Audio, + }, +}) + +var invalidGroups = models.NewRelatedGroups([]models.GroupsAudios{ + { + GroupID: invalidGroup, + AudioIndex: &group1Audio, + }, +}) + +var getAudioGroupsJSONScenarios = []audioGroupsTestScenario{ + { + models.Audio{ + ID: audioID, + Groups: validGroups, + }, + []jsonschema.AudioGroup{ + { + GroupName: group1Name, + AudioIndex: group1Audio, + }, + { + GroupName: group2Name, + AudioIndex: group2Audio, + }, + }, + false, + }, + { + models.Audio{ + ID: noGroupsID, + Groups: models.NewRelatedGroups([]models.GroupsAudios{}), + }, + nil, + false, + }, + { + models.Audio{ + ID: errFindGroupID, + Groups: invalidGroups, + }, + nil, + true, + }, +} + +func TestGetAudioGroupsJSON(t *testing.T) { + db := mocks.NewDatabase() + + groupErr := errors.New("error getting group") + + db.Group.On("Find", testCtx, validGroup1).Return(&models.Group{ + Name: group1Name, + }, nil).Once() + db.Group.On("Find", testCtx, validGroup2).Return(&models.Group{ + Name: group2Name, + }, nil).Once() + db.Group.On("Find", testCtx, invalidGroup).Return(nil, groupErr).Once() + + for i, s := range getAudioGroupsJSONScenarios { + audio := s.input + json, err := GetAudioGroupsJSON(testCtx, db.Group, &audio) + + switch { + case !s.err && err != nil: + t.Errorf("[%d] unexpected error: %s", i, err.Error()) + case s.err && err == nil: + t.Errorf("[%d] expected error not returned", i) + default: + assert.Equal(t, s.expected, json, "[%d]", i) + } + } + + db.AssertExpectations(t) +} diff --git a/pkg/audio/filename_parser.go b/pkg/audio/filename_parser.go new file mode 100644 index 0000000000..cf0e0be22f --- /dev/null +++ b/pkg/audio/filename_parser.go @@ -0,0 +1,722 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "errors" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/stashapp/stash/pkg/studio" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/tag" +) + +type parserField struct { + field string + fieldRegex *regexp.Regexp + regex string + isFullDateField bool + isCaptured bool +} + +func newParserField(field string, regex string, captured bool) parserField { + ret := parserField{ + field: field, + isFullDateField: false, + isCaptured: captured, + } + + ret.fieldRegex, _ = regexp.Compile(`\{` + ret.field + `\}`) + + regexStr := regex + + if captured { + regexStr = "(" + regexStr + ")" + } + ret.regex = regexStr + + return ret +} + +func newFullDateParserField(field string, regex string) parserField { + ret := newParserField(field, regex, true) + ret.isFullDateField = true + return ret +} + +func (f parserField) replaceInPattern(pattern string) string { + return string(f.fieldRegex.ReplaceAllString(pattern, f.regex)) +} + +var validFields map[string]parserField +var escapeCharRE *regexp.Regexp +var capitalizeTitleRE *regexp.Regexp +var multiWSRE *regexp.Regexp +var delimiterRE *regexp.Regexp + +func compileREs() { + const escapeCharPattern = `([\-\.\(\)\[\]])` + escapeCharRE = regexp.MustCompile(escapeCharPattern) + + const capitaliseTitlePattern = `(?:^| )\w` + capitalizeTitleRE = regexp.MustCompile(capitaliseTitlePattern) + + const multiWSPattern = ` {2,}` + multiWSRE = regexp.MustCompile(multiWSPattern) + + const delimiterPattern = `(?:\.|-|_)` + delimiterRE = regexp.MustCompile(delimiterPattern) +} + +func initParserFields() { + if validFields != nil { + return + } + + ret := make(map[string]parserField) + + ret["title"] = newParserField("title", ".*", true) + ret["ext"] = newParserField("ext", ".*$", false) + + ret["d"] = newParserField("d", `(?:\.|-|_)`, false) + ret["rating"] = newParserField("rating", `\d`, true) + ret["rating100"] = newParserField("rating100", `\d`, true) + ret["performer"] = newParserField("performer", ".*", true) + ret["studio"] = newParserField("studio", ".*", true) + ret["movie"] = newParserField("movie", ".*", true) + ret["tag"] = newParserField("tag", ".*", true) + + // date fields + ret["date"] = newParserField("date", `\d{4}-\d{2}-\d{2}`, true) + ret["yyyy"] = newParserField("yyyy", `\d{4}`, true) + ret["yy"] = newParserField("yy", `\d{2}`, true) + ret["mm"] = newParserField("mm", `\d{2}`, true) + ret["mmm"] = newParserField("mmm", `\w{3}`, true) + ret["dd"] = newParserField("dd", `\d{2}`, true) + ret["yyyymmdd"] = newFullDateParserField("yyyymmdd", `\d{8}`) + ret["yymmdd"] = newFullDateParserField("yymmdd", `\d{6}`) + ret["ddmmyyyy"] = newFullDateParserField("ddmmyyyy", `\d{8}`) + ret["ddmmyy"] = newFullDateParserField("ddmmyy", `\d{6}`) + ret["mmddyyyy"] = newFullDateParserField("mmddyyyy", `\d{8}`) + ret["mmddyy"] = newFullDateParserField("mmddyy", `\d{6}`) + + validFields = ret +} + +func replacePatternWithRegex(pattern string, ignoreWords []string) string { + initParserFields() + + for _, field := range validFields { + pattern = field.replaceInPattern(pattern) + } + + ignoreClause := getIgnoreClause(ignoreWords) + ignoreField := newParserField("i", ignoreClause, false) + pattern = ignoreField.replaceInPattern(pattern) + + return pattern +} + +type parseMapper struct { + fields []string + regexString string + regex *regexp.Regexp +} + +func getIgnoreClause(ignoreFields []string) string { + if len(ignoreFields) == 0 { + return "" + } + + var ignoreClauses []string + + for _, v := range ignoreFields { + newVal := string(escapeCharRE.ReplaceAllString(v, `\$1`)) + newVal = strings.TrimSpace(newVal) + newVal = "(?:" + newVal + ")" + ignoreClauses = append(ignoreClauses, newVal) + } + + return "(?:" + strings.Join(ignoreClauses, "|") + ")" +} + +func newParseMapper(pattern string, ignoreFields []string) (*parseMapper, error) { + ret := &parseMapper{} + + // escape control characters + regex := escapeCharRE.ReplaceAllString(pattern, `\$1`) + + // replace {} with wildcard + braceRE := regexp.MustCompile(`\{\}`) + regex = braceRE.ReplaceAllString(regex, ".*") + + // replace all known fields with applicable regexes + regex = replacePatternWithRegex(regex, ignoreFields) + + ret.regexString = regex + + // make case insensitive + regex = "(?i)" + regex + + var err error + + ret.regex, err = regexp.Compile(regex) + + if err != nil { + return nil, err + } + + // find invalid fields + invalidRE := regexp.MustCompile(`\{[A-Za-z]+\}`) + foundInvalid := invalidRE.FindAllString(regex, -1) + if len(foundInvalid) > 0 { + return nil, errors.New("Invalid fields: " + strings.Join(foundInvalid, ", ")) + } + + fieldExtractor := regexp.MustCompile(`\{([A-Za-z]+)\}`) + + result := fieldExtractor.FindAllStringSubmatch(pattern, -1) + + var fields []string + for _, v := range result { + field := v[1] + + // only add to fields if it is captured + parserField, found := validFields[field] + if found && parserField.isCaptured { + fields = append(fields, field) + } + } + + ret.fields = fields + + return ret, nil +} + +type audioHolder struct { + audio *models.Audio + result *models.Audio + yyyy string + mm string + dd string + performers []string + groups []string + studio string + tags []string +} + +func newAudioHolder(audio *models.Audio) *audioHolder { + audioCopy := models.Audio{ + ID: audio.ID, + Files: audio.Files, + // Checksum: audio.Checksum, + // Path: audio.Path, + } + ret := audioHolder{ + audio: audio, + result: &audioCopy, + } + + return &ret +} + +func validateRating(rating int) bool { + return rating >= 1 && rating <= 5 +} + +func validateRating100(rating100 int) bool { + return rating100 >= 1 && rating100 <= 100 +} + +// returns nil if invalid +func parseDate(dateStr string) *models.Date { + splits := strings.Split(dateStr, "-") + if len(splits) != 3 { + return nil + } + + year, _ := strconv.Atoi(splits[0]) + month, _ := strconv.Atoi(splits[1]) + d, _ := strconv.Atoi(splits[2]) + + // assume year must be between 1900 and 2100 + if year < 1900 || year > 2100 { + return nil + } + + if month < 1 || month > 12 { + return nil + } + + // not checking individual months to ensure date is in the correct range + if d < 1 || d > 31 { + return nil + } + + ret, err := models.ParseDate(dateStr) + if err != nil { + return nil + } + return &ret +} + +func (h *audioHolder) setDate(field *parserField, value string) { + yearIndex := 0 + yearLength := len(strings.Split(field.field, "y")) - 1 + dateIndex := 0 + monthIndex := 0 + + switch field.field { + case "yyyymmdd", "yymmdd": + monthIndex = yearLength + dateIndex = monthIndex + 2 + case "ddmmyyyy", "ddmmyy": + monthIndex = 2 + yearIndex = monthIndex + 2 + case "mmddyyyy", "mmddyy": + dateIndex = monthIndex + 2 + yearIndex = dateIndex + 2 + } + + yearValue := value[yearIndex : yearIndex+yearLength] + monthValue := value[monthIndex : monthIndex+2] + dateValue := value[dateIndex : dateIndex+2] + + fullDate := yearValue + "-" + monthValue + "-" + dateValue + + // ensure the date is valid + // only set if new value is different from the old + newDate := parseDate(fullDate) + if newDate != nil && h.audio.Date != nil && *h.audio.Date != *newDate { + h.result.Date = newDate + } +} + +func mmmToMonth(mmm string) string { + format := "02-Jan-2006" + dateStr := "01-" + mmm + "-2000" + t, err := time.Parse(format, dateStr) + + if err != nil { + return "" + } + + // expect month in two-digit format + format = "01-02-2006" + return t.Format(format)[0:2] +} + +func (h *audioHolder) setField(field parserField, value interface{}) { + if field.isFullDateField { + h.setDate(&field, value.(string)) + return + } + + switch field.field { + case "title": + v := value.(string) + h.result.Title = v + case "date": + h.result.Date = parseDate(value.(string)) + case "rating": + rating, _ := strconv.Atoi(value.(string)) + if validateRating(rating) { + // convert to 1-100 scale + rating = models.Rating5To100(rating) + h.result.Rating = &rating + } + case "rating100": + rating, _ := strconv.Atoi(value.(string)) + if validateRating100(rating) { + h.result.Rating = &rating + } + case "performer": + // add performer to list + h.performers = append(h.performers, value.(string)) + case "studio": + h.studio = value.(string) + case "movie": + h.groups = append(h.groups, value.(string)) + case "tag": + h.tags = append(h.tags, value.(string)) + case "yyyy": + h.yyyy = value.(string) + case "yy": + v := value.(string) + v = "20" + v + h.yyyy = v + case "mmm": + h.mm = mmmToMonth(value.(string)) + case "mm": + h.mm = value.(string) + case "dd": + h.dd = value.(string) + } +} + +func (h *audioHolder) postParse() { + // set the date if the components are set + if h.yyyy != "" && h.mm != "" && h.dd != "" { + fullDate := h.yyyy + "-" + h.mm + "-" + h.dd + h.setField(validFields["date"], fullDate) + } +} + +func (m parseMapper) parse(audio *models.Audio) *audioHolder { + + // #302 - if the pattern includes a path separator, then include the entire + // audio path in the match. Otherwise, use the default behaviour of just + // the file's basename + // must be double \ because of the regex escaping + filename := filepath.Base(audio.Path) + if strings.Contains(m.regexString, `\\`) || strings.Contains(m.regexString, "/") { + filename = audio.Path + } + + result := m.regex.FindStringSubmatch(filename) + + if len(result) == 0 { + return nil + } + + initParserFields() + + audioHolder := newAudioHolder(audio) + + for index, match := range result { + if index == 0 { + // skip entire match + continue + } + + field := m.fields[index-1] + parserField, found := validFields[field] + if found { + audioHolder.setField(parserField, match) + } + } + + audioHolder.postParse() + + return audioHolder +} + +type FilenameParser struct { + Pattern string + ParserInput models.AudioParserInput + Filter *models.FindFilterType + whitespaceRE *regexp.Regexp + repository FilenameParserRepository + performerCache map[string]*models.Performer + studioCache map[string]*models.Studio + groupCache map[string]*models.Group + tagCache map[string]*models.Tag +} + +func NewFilenameParser(filter *models.FindFilterType, config models.AudioParserInput, repo FilenameParserRepository) *FilenameParser { + p := &FilenameParser{ + Pattern: *filter.Q, + ParserInput: config, + Filter: filter, + repository: repo, + } + + p.performerCache = make(map[string]*models.Performer) + p.studioCache = make(map[string]*models.Studio) + p.groupCache = make(map[string]*models.Group) + p.tagCache = make(map[string]*models.Tag) + + p.initWhiteSpaceRegex() + + return p +} + +func (p *FilenameParser) initWhiteSpaceRegex() { + compileREs() + + wsChars := "" + if p.ParserInput.WhitespaceCharacters != nil { + wsChars = *p.ParserInput.WhitespaceCharacters + wsChars = strings.TrimSpace(wsChars) + } + + if len(wsChars) > 0 { + wsRegExp := escapeCharRE.ReplaceAllString(wsChars, `\$1`) + wsRegExp = "[" + wsRegExp + "]" + p.whitespaceRE = regexp.MustCompile(wsRegExp) + } +} + +type FilenameParserRepository struct { + Audio models.AudioQueryer + Performer PerformerNamesFinder + Studio models.StudioQueryer + Group GroupNameFinder + Tag models.TagNameFinder +} + +func NewFilenameParserRepository(repo models.Repository) FilenameParserRepository { + return FilenameParserRepository{ + Audio: repo.Audio, + Performer: repo.Performer, + Studio: repo.Studio, + Group: repo.Group, + Tag: repo.Tag, + } +} + +func (p *FilenameParser) Parse(ctx context.Context) ([]*models.AudioParserResult, int, error) { + // perform the query to find the audios + mapper, err := newParseMapper(p.Pattern, p.ParserInput.IgnoreWords) + + if err != nil { + return nil, 0, err + } + + audioFilter := &models.AudioFilterType{ + Path: &models.StringCriterionInput{ + Modifier: models.CriterionModifierMatchesRegex, + Value: "(?i)" + mapper.regexString, + }, + } + + if p.ParserInput.IgnoreOrganized != nil && *p.ParserInput.IgnoreOrganized { + organized := false + audioFilter.Organized = &organized + } + + p.Filter.Q = nil + + audios, total, err := QueryWithCount(ctx, p.repository.Audio, audioFilter, p.Filter) + if err != nil { + return nil, 0, err + } + + ret := p.parseAudios(ctx, audios, mapper) + + return ret, total, nil +} + +func (p *FilenameParser) parseAudios(ctx context.Context, audios []*models.Audio, mapper *parseMapper) []*models.AudioParserResult { + var ret []*models.AudioParserResult + for _, audio := range audios { + audioHolder := mapper.parse(audio) + + if audioHolder != nil { + r := &models.AudioParserResult{ + Audio: audio, + } + p.setParserResult(ctx, *audioHolder, r) + + ret = append(ret, r) + } + } + + return ret +} + +func (p FilenameParser) replaceWhitespaceCharacters(value string) string { + if p.whitespaceRE != nil { + value = p.whitespaceRE.ReplaceAllString(value, " ") + // remove consecutive spaces + value = multiWSRE.ReplaceAllString(value, " ") + } + + return value +} + +type PerformerNamesFinder interface { + FindByNames(ctx context.Context, names []string, nocase bool) ([]*models.Performer, error) +} + +func (p *FilenameParser) queryPerformer(ctx context.Context, qb PerformerNamesFinder, performerName string) *models.Performer { + // massage the performer name + performerName = delimiterRE.ReplaceAllString(performerName, " ") + + // check cache first + if ret, found := p.performerCache[performerName]; found { + return ret + } + + // perform an exact match and grab the first + performers, _ := qb.FindByNames(ctx, []string{performerName}, true) + + var ret *models.Performer + if len(performers) > 0 { + ret = performers[0] + } + + // add result to cache + p.performerCache[performerName] = ret + + return ret +} + +func (p *FilenameParser) queryStudio(ctx context.Context, qb models.StudioQueryer, studioName string) *models.Studio { + // massage the performer name + studioName = delimiterRE.ReplaceAllString(studioName, " ") + + // check cache first + if ret, found := p.studioCache[studioName]; found { + return ret + } + + ret, _ := studio.ByName(ctx, qb, studioName) + + // try to match on alias + if ret == nil { + ret, _ = studio.ByAlias(ctx, qb, studioName) + } + + // add result to cache + p.studioCache[studioName] = ret + + return ret +} + +type GroupNameFinder interface { + FindByName(ctx context.Context, name string, nocase bool) (*models.Group, error) +} + +func (p *FilenameParser) queryGroup(ctx context.Context, qb GroupNameFinder, groupName string) *models.Group { + // massage the group name + groupName = delimiterRE.ReplaceAllString(groupName, " ") + + // check cache first + if ret, found := p.groupCache[groupName]; found { + return ret + } + + ret, _ := qb.FindByName(ctx, groupName, true) + + // add result to cache + p.groupCache[groupName] = ret + + return ret +} + +func (p *FilenameParser) queryTag(ctx context.Context, qb models.TagNameFinder, tagName string) *models.Tag { + // massage the tag name + tagName = delimiterRE.ReplaceAllString(tagName, " ") + + // check cache first + if ret, found := p.tagCache[tagName]; found { + return ret + } + + // match tag name exactly + ret, _ := tag.ByName(ctx, qb, tagName) + + // try to match on alias + if ret == nil { + ret, _ = tag.ByAlias(ctx, qb, tagName) + } + + // add result to cache + p.tagCache[tagName] = ret + + return ret +} + +func (p *FilenameParser) setPerformers(ctx context.Context, qb PerformerNamesFinder, h audioHolder, result *models.AudioParserResult) { + // query for each performer + performersSet := make(map[int]bool) + for _, performerName := range h.performers { + if performerName != "" { + performer := p.queryPerformer(ctx, qb, performerName) + if performer != nil { + if _, found := performersSet[performer.ID]; !found { + result.PerformerIds = append(result.PerformerIds, strconv.Itoa(performer.ID)) + performersSet[performer.ID] = true + } + } + } + } +} + +func (p *FilenameParser) setTags(ctx context.Context, qb models.TagNameFinder, h audioHolder, result *models.AudioParserResult) { + // query for each performer + tagsSet := make(map[int]bool) + for _, tagName := range h.tags { + if tagName != "" { + tag := p.queryTag(ctx, qb, tagName) + if tag != nil { + if _, found := tagsSet[tag.ID]; !found { + result.TagIds = append(result.TagIds, strconv.Itoa(tag.ID)) + tagsSet[tag.ID] = true + } + } + } + } +} + +func (p *FilenameParser) setStudio(ctx context.Context, qb models.StudioQueryer, h audioHolder, result *models.AudioParserResult) { + // query for each performer + if h.studio != "" { + studio := p.queryStudio(ctx, qb, h.studio) + if studio != nil { + studioID := strconv.Itoa(studio.ID) + result.StudioID = &studioID + } + } +} + +func (p *FilenameParser) setGroups(ctx context.Context, qb GroupNameFinder, h audioHolder, result *models.AudioParserResult) { + // query for each group + groupsSet := make(map[int]bool) + for _, groupName := range h.groups { + if groupName != "" { + group := p.queryGroup(ctx, qb, groupName) + if group != nil { + if _, found := groupsSet[group.ID]; !found { + result.Groups = append(result.Groups, &models.AudioGroupID{ + GroupID: strconv.Itoa(group.ID), + }) + groupsSet[group.ID] = true + } + } + } + } +} + +func (p *FilenameParser) setParserResult(ctx context.Context, h audioHolder, result *models.AudioParserResult) { + if h.result.Title != "" { + title := h.result.Title + title = p.replaceWhitespaceCharacters(title) + + if p.ParserInput.CapitalizeTitle != nil && *p.ParserInput.CapitalizeTitle { + title = capitalizeTitleRE.ReplaceAllStringFunc(title, strings.ToUpper) + } + + result.Title = &title + } + + if h.result.Date != nil { + dateStr := h.result.Date.String() + result.Date = &dateStr + } + + if h.result.Rating != nil { + result.Rating = h.result.Rating + } + + r := p.repository + + if len(h.performers) > 0 { + p.setPerformers(ctx, r.Performer, h, result) + } + if len(h.tags) > 0 { + p.setTags(ctx, r.Tag, h, result) + } + p.setStudio(ctx, r.Studio, h, result) + + if len(h.groups) > 0 { + p.setGroups(ctx, r.Group, h, result) + } +} diff --git a/pkg/audio/filter.go b/pkg/audio/filter.go new file mode 100644 index 0000000000..4e2ae5e0fd --- /dev/null +++ b/pkg/audio/filter.go @@ -0,0 +1,42 @@ +// TODO(audio): update this file + +package audio + +import ( + "path/filepath" + "strings" + + "github.com/stashapp/stash/pkg/models" +) + +func PathsFilter(paths []string) *models.AudioFilterType { + if paths == nil { + return nil + } + + sep := string(filepath.Separator) + + var ret *models.AudioFilterType + var or *models.AudioFilterType + for _, p := range paths { + newOr := &models.AudioFilterType{} + if or != nil { + or.Or = newOr + } else { + ret = newOr + } + + or = newOr + + if !strings.HasSuffix(p, sep) { + p += sep + } + + or.Path = &models.StringCriterionInput{ + Modifier: models.CriterionModifierEquals, + Value: p + "%", + } + } + + return ret +} diff --git a/pkg/audio/find.go b/pkg/audio/find.go new file mode 100644 index 0000000000..aeb02ed3a0 --- /dev/null +++ b/pkg/audio/find.go @@ -0,0 +1,84 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "fmt" + + "github.com/stashapp/stash/pkg/models" +) + +type LoadRelationshipOption func(context.Context, *models.Audio, models.AudioReader) error + +func LoadURLs(ctx context.Context, audio *models.Audio, r models.AudioReader) error { + if err := audio.LoadURLs(ctx, r); err != nil { + return fmt.Errorf("loading audio URLs: %w", err) + } + + return nil +} + +func LoadFiles(ctx context.Context, audio *models.Audio, r models.AudioReader) error { + if err := audio.LoadFiles(ctx, r); err != nil { + return fmt.Errorf("failed to load files for audio %d: %w", audio.ID, err) + } + + return nil +} + +// FindByIDs retrieves multiple audios by their IDs. +// Missing audios will be ignored, and the returned audios are unsorted. +// This method will load the specified relationships for each audio. +func (s *Service) FindByIDs(ctx context.Context, ids []int, load ...LoadRelationshipOption) ([]*models.Audio, error) { + var audios []*models.Audio + qb := s.Repository + + var err error + audios, err = qb.FindByIDs(ctx, ids) + if err != nil { + return nil, err + } + + // TODO - we should bulk load these relationships + for _, audio := range audios { + if err := s.LoadRelationships(ctx, audio, load...); err != nil { + return nil, err + } + } + + return audios, nil +} + +// FindMany retrieves multiple audios by their IDs. Return value is guaranteed to be in the same order as the input. +// Missing audios will return an error. +// This method will load the specified relationships for each audio. +func (s *Service) FindMany(ctx context.Context, ids []int, load ...LoadRelationshipOption) ([]*models.Audio, error) { + var audios []*models.Audio + qb := s.Repository + + var err error + audios, err = qb.FindMany(ctx, ids) + if err != nil { + return nil, err + } + + // TODO - we should bulk load these relationships + for _, audio := range audios { + if err := s.LoadRelationships(ctx, audio, load...); err != nil { + return nil, err + } + } + + return audios, nil +} + +func (s *Service) LoadRelationships(ctx context.Context, audio *models.Audio, load ...LoadRelationshipOption) error { + for _, l := range load { + if err := l(ctx, audio, s.Repository); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/audio/fingerprints.go b/pkg/audio/fingerprints.go new file mode 100644 index 0000000000..8c9c6be80b --- /dev/null +++ b/pkg/audio/fingerprints.go @@ -0,0 +1,42 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "fmt" + + "github.com/stashapp/stash/pkg/models" +) + +// GetFingerprints returns the fingerprints for the given audio ids. +func (s *Service) GetAudiosFingerprints(ctx context.Context, ids []int) ([]models.Fingerprints, error) { + fingerprints := make([]models.Fingerprints, len(ids)) + + qb := s.Repository + + for i, audioID := range ids { + audio, err := qb.Find(ctx, audioID) + if err != nil { + return nil, err + } + + if audio == nil { + return nil, fmt.Errorf("audio with id %d not found", audioID) + } + + if err := audio.LoadFiles(ctx, qb); err != nil { + return nil, err + } + + var audioFPs models.Fingerprints + + for _, f := range audio.Files.List() { + audioFPs = append(audioFPs, f.Fingerprints...) + } + + fingerprints[i] = audioFPs + } + + return fingerprints, nil +} diff --git a/pkg/audio/generate/generator.go b/pkg/audio/generate/generator.go new file mode 100644 index 0000000000..d1aa7e158a --- /dev/null +++ b/pkg/audio/generate/generator.go @@ -0,0 +1,186 @@ +// TODO(audio): update this file + +// Package generate provides functions to generate media assets from audios. +package generate + +import ( + "bytes" + "errors" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/stashapp/stash/pkg/ffmpeg" + "github.com/stashapp/stash/pkg/fsutil" +) + +const ( + mp4Pattern = "*.mp4" + webpPattern = "*.webp" + jpgPattern = "*.jpg" + txtPattern = "*.txt" + vttPattern = "*.vtt" +) + +type Paths interface { + TempFile(pattern string) (*os.File, error) +} + +type MarkerPaths interface { + Paths + + GetVideoPreviewPath(checksum string, seconds int) string + GetWebpPreviewPath(checksum string, seconds int) string + GetScreenshotPath(checksum string, seconds int) string +} + +type AudioPaths interface { + Paths + + GetVideoPreviewPath(checksum string) string + GetWebpPreviewPath(checksum string) string + + GetSpriteImageFilePath(checksum string) string + GetSpriteVttFilePath(checksum string) string + + GetTranscodePath(checksum string) string +} + +type FFMpegConfig interface { + GetTranscodeInputArgs() []string + GetTranscodeOutputArgs() []string +} + +type Generator struct { + Encoder *ffmpeg.FFMpeg + FFMpegConfig FFMpegConfig + LockManager *fsutil.ReadLockManager + MarkerPaths MarkerPaths + AudioPaths AudioPaths + Overwrite bool +} + +type generateFn func(lockCtx *fsutil.LockContext, tmpFn string) error + +func (g Generator) tempFile(p Paths, pattern string) (*os.File, error) { + tmpFile, err := p.TempFile(pattern) // tmp output in case the process ends abruptly + if err != nil { + return nil, fmt.Errorf("creating temporary file: %w", err) + } + _ = tmpFile.Close() + return tmpFile, err +} + +// generateFile performs a generate operation by generating a temporary file using p and pattern, then +// moving it to output on success. +func (g Generator) generateFile(lockCtx *fsutil.LockContext, p Paths, pattern string, output string, generateFn generateFn) error { + tmpFile, err := g.tempFile(p, pattern) // tmp output in case the process ends abruptly + if err != nil { + return err + } + + tmpFn := tmpFile.Name() + defer func() { + _ = os.Remove(tmpFn) + }() + + if err := generateFn(lockCtx, tmpFn); err != nil { + return err + } + + // check if generated empty file + stat, err := os.Stat(tmpFn) + if err != nil { + return fmt.Errorf("error getting file stat: %w", err) + } + + if stat.Size() == 0 { + return fmt.Errorf("ffmpeg command produced no output") + } + + if err := fsutil.SafeMove(tmpFn, output); err != nil { + return fmt.Errorf("moving %s to %s failed: %w", tmpFn, output, err) + } + + return nil +} + +// generateBytes performs a generate operation by generating a temporary file using p and pattern, returns the contents, then deletes it. +func (g Generator) generateBytes(lockCtx *fsutil.LockContext, p Paths, pattern string, generateFn generateFn) ([]byte, error) { + tmpFile, err := g.tempFile(p, pattern) // tmp output in case the process ends abruptly + if err != nil { + return nil, err + } + + tmpFn := tmpFile.Name() + defer func() { + _ = os.Remove(tmpFn) + }() + + if err := generateFn(lockCtx, tmpFn); err != nil { + return nil, err + } + + defer os.Remove(tmpFn) + return os.ReadFile(tmpFn) +} + +// generate runs ffmpeg with the given args and waits for it to finish. +// Returns an error if the command fails. If the command fails, the return +// value will be of type *exec.ExitError. +func (g Generator) generate(ctx *fsutil.LockContext, args []string) error { + cmd := g.Encoder.Command(ctx, args) + + var stderr bytes.Buffer + cmd.Stderr = &stderr + + if err := cmd.Start(); err != nil { + return fmt.Errorf("error starting command: %w", err) + } + + ctx.AttachCommand(cmd) + + if err := cmd.Wait(); err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + exitErr.Stderr = stderr.Bytes() + err = exitErr + } + return fmt.Errorf("error running ffmpeg command <%s>: %w", strings.Join(args, " "), err) + } + + return nil +} + +// GenerateOutput runs ffmpeg with the given args and returns it standard output. +func (g Generator) generateOutput(lockCtx *fsutil.LockContext, args []string) ([]byte, error) { + cmd := g.Encoder.Command(lockCtx, args) + + var stdout bytes.Buffer + cmd.Stdout = &stdout + + var stderr bytes.Buffer + cmd.Stderr = &stderr + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("error starting command: %w", err) + } + + lockCtx.AttachCommand(cmd) + + if err := cmd.Wait(); err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + exitErr.Stderr = stderr.Bytes() + err = exitErr + } + return nil, fmt.Errorf("error running ffmpeg command <%s>: %w", strings.Join(args, " "), err) + } + + if stdout.Len() == 0 { + return nil, fmt.Errorf("ffmpeg command produced no output: <%s>", strings.Join(args, " ")) + } + + return stdout.Bytes(), nil +} diff --git a/pkg/audio/hash.go b/pkg/audio/hash.go new file mode 100644 index 0000000000..26bb04f7ba --- /dev/null +++ b/pkg/audio/hash.go @@ -0,0 +1,20 @@ +// TODO(audio): update this file + +package audio + +import ( + "github.com/stashapp/stash/pkg/models" +) + +// GetHash returns the hash of the file, based on the hash algorithm provided. If +// hash algorithm is MD5, then Checksum is returned. Otherwise, OSHash is returned. +func GetHash(f models.File, hashAlgorithm models.HashAlgorithm) string { + switch hashAlgorithm { + case models.HashAlgorithmMd5: + return f.Base().Fingerprints.GetString(models.FingerprintTypeMD5) + case models.HashAlgorithmOshash: + return f.Base().Fingerprints.GetString(models.FingerprintTypeOshash) + default: + panic("unknown hash algorithm") + } +} diff --git a/pkg/audio/import.go b/pkg/audio/import.go new file mode 100644 index 0000000000..8d2bd20b4e --- /dev/null +++ b/pkg/audio/import.go @@ -0,0 +1,575 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "fmt" + "slices" + "strings" + "time" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/json" + "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/sliceutil" + "github.com/stashapp/stash/pkg/utils" +) + +type ImporterReaderWriter interface { + models.AudioCreatorUpdater + models.ViewHistoryWriter + models.OHistoryWriter + models.CustomFieldsWriter + FindByFileID(ctx context.Context, fileID models.FileID) ([]*models.Audio, error) +} + +type Importer struct { + ReaderWriter ImporterReaderWriter + FileFinder models.FileFinder + StudioWriter models.StudioFinderCreator + GalleryFinder models.GalleryFinder + PerformerWriter models.PerformerFinderCreator + GroupWriter models.GroupFinderCreator + TagWriter models.TagFinderCreator + Input jsonschema.Audio + MissingRefBehaviour models.ImportMissingRefEnum + FileNamingAlgorithm models.HashAlgorithm + + ID int + audio models.Audio + customFields map[string]interface{} + coverImageData []byte + viewHistory []time.Time + oHistory []time.Time +} + +func (i *Importer) PreImport(ctx context.Context) error { + i.audio = i.audioJSONToAudio(i.Input) + + if err := i.populateFiles(ctx); err != nil { + return err + } + + if err := i.populateStudio(ctx); err != nil { + return err + } + + if err := i.populateGalleries(ctx); err != nil { + return err + } + + if err := i.populatePerformers(ctx); err != nil { + return err + } + + if err := i.populateTags(ctx); err != nil { + return err + } + + if err := i.populateGroups(ctx); err != nil { + return err + } + + var err error + if len(i.Input.Cover) > 0 { + i.coverImageData, err = utils.ProcessBase64Image(i.Input.Cover) + if err != nil { + return fmt.Errorf("invalid cover image: %v", err) + } + } + + i.customFields = i.Input.CustomFields + + i.populateViewHistory() + i.populateOHistory() + + return nil +} + +func (i *Importer) audioJSONToAudio(audioJSON jsonschema.Audio) models.Audio { + newAudio := models.Audio{ + Title: audioJSON.Title, + Code: audioJSON.Code, + Details: audioJSON.Details, + PerformerIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + GalleryIDs: models.NewRelatedIDs([]int{}), + Groups: models.NewRelatedGroups([]models.GroupsAudios{}), + } + + if len(audioJSON.URLs) > 0 { + newAudio.URLs = models.NewRelatedStrings(audioJSON.URLs) + } else if audioJSON.URL != "" { + newAudio.URLs = models.NewRelatedStrings([]string{audioJSON.URL}) + } + + if audioJSON.Date != "" { + d, err := models.ParseDate(audioJSON.Date) + if err == nil { + newAudio.Date = &d + } + } + if audioJSON.Rating != 0 { + newAudio.Rating = &audioJSON.Rating + } + + newAudio.Organized = audioJSON.Organized + newAudio.CreatedAt = audioJSON.CreatedAt.GetTime() + newAudio.UpdatedAt = audioJSON.UpdatedAt.GetTime() + newAudio.ResumeTime = audioJSON.ResumeTime + newAudio.PlayDuration = audioJSON.PlayDuration + + return newAudio +} + +func getHistory(historyJSON []json.JSONTime, count int, last json.JSONTime, createdAt json.JSONTime) []time.Time { + var ret []time.Time + + if len(historyJSON) > 0 { + for _, d := range historyJSON { + ret = append(ret, d.GetTime()) + } + } else if count > 0 { + createdAt := createdAt.GetTime() + for j := 0; j < count; j++ { + t := createdAt + if j+1 == count && !last.IsZero() { + // last one, use last play date + t = last.GetTime() + } + ret = append(ret, t) + } + } + + return ret +} + +func (i *Importer) populateViewHistory() { + i.viewHistory = getHistory( + i.Input.PlayHistory, + i.Input.PlayCount, + i.Input.LastPlayedAt, + i.Input.CreatedAt, + ) +} + +func (i *Importer) populateOHistory() { + i.oHistory = getHistory( + i.Input.OHistory, + i.Input.OCounter, + i.Input.CreatedAt, // no last o count date + i.Input.CreatedAt, + ) +} + +func (i *Importer) populateFiles(ctx context.Context) error { + files := make([]*models.AudioFile, 0) + + for _, ref := range i.Input.Files { + path := ref + f, err := i.FileFinder.FindByPath(ctx, path, true) + if err != nil { + return fmt.Errorf("error finding file: %w", err) + } + + if f == nil { + return fmt.Errorf("audio file '%s' not found", path) + } else { + files = append(files, f.(*models.AudioFile)) + } + } + + i.audio.Files = models.NewRelatedAudioFiles(files) + + return nil +} + +func (i *Importer) populateStudio(ctx context.Context) error { + if i.Input.Studio != "" { + studio, err := i.StudioWriter.FindByName(ctx, i.Input.Studio, false) + if err != nil { + return fmt.Errorf("error finding studio by name: %v", err) + } + + if studio == nil { + if i.MissingRefBehaviour == models.ImportMissingRefEnumFail { + return fmt.Errorf("audio studio '%s' not found", i.Input.Studio) + } + + if i.MissingRefBehaviour == models.ImportMissingRefEnumIgnore { + return nil + } + + if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { + studioID, err := i.createStudio(ctx, i.Input.Studio) + if err != nil { + return err + } + i.audio.StudioID = &studioID + } + } else { + i.audio.StudioID = &studio.ID + } + } + + return nil +} + +func (i *Importer) createStudio(ctx context.Context, name string) (int, error) { + newStudio := models.NewCreateStudioInput() + newStudio.Name = name + + err := i.StudioWriter.Create(ctx, &newStudio) + if err != nil { + return 0, err + } + + return newStudio.ID, nil +} + +func (i *Importer) locateGallery(ctx context.Context, ref jsonschema.GalleryRef) (*models.Gallery, error) { + var galleries []*models.Gallery + var err error + switch { + case ref.FolderPath != "": + galleries, err = i.GalleryFinder.FindByPath(ctx, ref.FolderPath) + case len(ref.ZipFiles) > 0: + for _, p := range ref.ZipFiles { + galleries, err = i.GalleryFinder.FindByPath(ctx, p) + if err != nil { + break + } + + if len(galleries) > 0 { + break + } + } + case ref.Title != "": + galleries, err = i.GalleryFinder.FindUserGalleryByTitle(ctx, ref.Title) + } + + var ret *models.Gallery + if len(galleries) > 0 { + ret = galleries[0] + } + + return ret, err +} + +func (i *Importer) populateGalleries(ctx context.Context) error { + for _, ref := range i.Input.Galleries { + gallery, err := i.locateGallery(ctx, ref) + if err != nil { + return err + } + + if gallery == nil { + if i.MissingRefBehaviour == models.ImportMissingRefEnumFail { + return fmt.Errorf("audio gallery '%s' not found", ref.String()) + } + + // we don't create galleries - just ignore + } else { + i.audio.GalleryIDs.Add(gallery.ID) + } + } + + return nil +} + +func (i *Importer) populatePerformers(ctx context.Context) error { + if len(i.Input.Performers) > 0 { + names := i.Input.Performers + performers, err := i.PerformerWriter.FindByNames(ctx, names, false) + if err != nil { + return err + } + + var pluckedNames []string + for _, performer := range performers { + if performer.Name == "" { + continue + } + pluckedNames = append(pluckedNames, performer.Name) + } + + missingPerformers := sliceutil.Filter(names, func(name string) bool { + return !slices.Contains(pluckedNames, name) + }) + + if len(missingPerformers) > 0 { + if i.MissingRefBehaviour == models.ImportMissingRefEnumFail { + return fmt.Errorf("audio performers [%s] not found", strings.Join(missingPerformers, ", ")) + } + + if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { + createdPerformers, err := i.createPerformers(ctx, missingPerformers) + if err != nil { + return fmt.Errorf("error creating audio performers: %v", err) + } + + performers = append(performers, createdPerformers...) + } + + // ignore if MissingRefBehaviour set to Ignore + } + + for _, p := range performers { + i.audio.PerformerIDs.Add(p.ID) + } + } + + return nil +} + +func (i *Importer) createPerformers(ctx context.Context, names []string) ([]*models.Performer, error) { + var ret []*models.Performer + for _, name := range names { + newPerformer := models.NewPerformer() + newPerformer.Name = name + + err := i.PerformerWriter.Create(ctx, &models.CreatePerformerInput{ + Performer: &newPerformer, + }) + if err != nil { + return nil, err + } + + ret = append(ret, &newPerformer) + } + + return ret, nil +} + +func (i *Importer) populateGroups(ctx context.Context) error { + if len(i.Input.Groups) > 0 { + for _, inputGroup := range i.Input.Groups { + group, err := i.GroupWriter.FindByName(ctx, inputGroup.GroupName, false) + if err != nil { + return fmt.Errorf("error finding audio group: %v", err) + } + + var groupID int + if group == nil { + if i.MissingRefBehaviour == models.ImportMissingRefEnumFail { + return fmt.Errorf("audio group [%s] not found", inputGroup.GroupName) + } + + if i.MissingRefBehaviour == models.ImportMissingRefEnumCreate { + groupID, err = i.createGroup(ctx, inputGroup.GroupName) + if err != nil { + return fmt.Errorf("error creating audio group: %v", err) + } + } + + // ignore if MissingRefBehaviour set to Ignore + if i.MissingRefBehaviour == models.ImportMissingRefEnumIgnore { + continue + } + } else { + groupID = group.ID + } + + toAdd := models.GroupsAudios{ + GroupID: groupID, + } + + if inputGroup.AudioIndex != 0 { + index := inputGroup.AudioIndex + toAdd.AudioIndex = &index + } + + i.audio.Groups.Add(toAdd) + } + } + + return nil +} + +func (i *Importer) createGroup(ctx context.Context, name string) (int, error) { + newGroup := models.NewGroup() + newGroup.Name = name + + err := i.GroupWriter.Create(ctx, &newGroup) + if err != nil { + return 0, err + } + + return newGroup.ID, nil +} + +func (i *Importer) populateTags(ctx context.Context) error { + if len(i.Input.Tags) > 0 { + + tags, err := importTags(ctx, i.TagWriter, i.Input.Tags, i.MissingRefBehaviour) + if err != nil { + return err + } + + for _, p := range tags { + i.audio.TagIDs.Add(p.ID) + } + } + + return nil +} + +func (i *Importer) addViewHistory(ctx context.Context) error { + if len(i.viewHistory) > 0 { + _, err := i.ReaderWriter.AddViews(ctx, i.ID, i.viewHistory) + if err != nil { + return fmt.Errorf("error adding view date: %v", err) + } + } + + return nil +} + +func (i *Importer) addOHistory(ctx context.Context) error { + if len(i.oHistory) > 0 { + _, err := i.ReaderWriter.AddO(ctx, i.ID, i.oHistory) + if err != nil { + return fmt.Errorf("error adding o date: %v", err) + } + } + + return nil +} + +func (i *Importer) PostImport(ctx context.Context, id int) error { + if len(i.coverImageData) > 0 { + if err := i.ReaderWriter.UpdateCover(ctx, id, i.coverImageData); err != nil { + return fmt.Errorf("error setting audio images: %v", err) + } + } + + // add histories + if err := i.addViewHistory(ctx); err != nil { + return err + } + + if err := i.addOHistory(ctx); err != nil { + return err + } + + if len(i.customFields) > 0 { + if err := i.ReaderWriter.SetCustomFields(ctx, id, models.CustomFieldsInput{ + Full: i.customFields, + }); err != nil { + return fmt.Errorf("error setting audio custom fields: %v", err) + } + } + + return nil +} + +func (i *Importer) Name() string { + if i.Input.Title != "" { + return i.Input.Title + } + + if len(i.Input.Files) > 0 { + return i.Input.Files[0] + } + + return "" +} + +func (i *Importer) FindExistingID(ctx context.Context) (*int, error) { + var existing []*models.Audio + var err error + + for _, f := range i.audio.Files.List() { + existing, err = i.ReaderWriter.FindByFileID(ctx, f.ID) + if err != nil { + return nil, err + } + + if len(existing) > 0 { + id := existing[0].ID + return &id, nil + } + } + + return nil, nil +} + +func (i *Importer) Create(ctx context.Context) (*int, error) { + var fileIDs []models.FileID + for _, f := range i.audio.Files.List() { + fileIDs = append(fileIDs, f.Base().ID) + } + if err := i.ReaderWriter.Create(ctx, &i.audio, fileIDs); err != nil { + return nil, fmt.Errorf("error creating audio: %v", err) + } + + id := i.audio.ID + i.ID = id + return &id, nil +} + +func (i *Importer) Update(ctx context.Context, id int) error { + audio := i.audio + audio.ID = id + i.ID = id + if err := i.ReaderWriter.Update(ctx, &audio); err != nil { + return fmt.Errorf("error updating existing audio: %v", err) + } + + return nil +} + +func importTags(ctx context.Context, tagWriter models.TagFinderCreator, names []string, missingRefBehaviour models.ImportMissingRefEnum) ([]*models.Tag, error) { + tags, err := tagWriter.FindByNames(ctx, names, false) + if err != nil { + return nil, err + } + + var pluckedNames []string + for _, tag := range tags { + pluckedNames = append(pluckedNames, tag.Name) + } + + missingTags := sliceutil.Filter(names, func(name string) bool { + return !slices.Contains(pluckedNames, name) + }) + + if len(missingTags) > 0 { + if missingRefBehaviour == models.ImportMissingRefEnumFail { + return nil, fmt.Errorf("tags [%s] not found", strings.Join(missingTags, ", ")) + } + + if missingRefBehaviour == models.ImportMissingRefEnumCreate { + createdTags, err := createTags(ctx, tagWriter, missingTags) + if err != nil { + return nil, fmt.Errorf("error creating tags: %v", err) + } + + tags = append(tags, createdTags...) + } + + // ignore if MissingRefBehaviour set to Ignore + } + + return tags, nil +} + +func createTags(ctx context.Context, tagWriter models.TagCreator, names []string) ([]*models.Tag, error) { + var ret []*models.Tag + for _, name := range names { + newTag := models.NewTag() + newTag.Name = name + + err := tagWriter.Create(ctx, &models.CreateTagInput{ + Tag: &newTag, + }) + if err != nil { + return nil, err + } + + ret = append(ret, &newTag) + } + + return ret, nil +} diff --git a/pkg/audio/import_test.go b/pkg/audio/import_test.go new file mode 100644 index 0000000000..71509fd257 --- /dev/null +++ b/pkg/audio/import_test.go @@ -0,0 +1,638 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/json" + "github.com/stashapp/stash/pkg/models/jsonschema" + "github.com/stashapp/stash/pkg/models/mocks" + "github.com/stashapp/stash/pkg/sliceutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const invalidImage = "aW1hZ2VCeXRlcw&&" + +var ( + existingStudioID = 101 + existingPerformerID = 103 + existingGroupID = 104 + existingTagID = 105 + + existingStudioName = "existingStudioName" + existingStudioErr = "existingStudioErr" + missingStudioName = "missingStudioName" + + existingPerformerName = "existingPerformerName" + existingPerformerErr = "existingPerformerErr" + missingPerformerName = "missingPerformerName" + + existingGroupName = "existingGroupName" + existingGroupErr = "existingGroupErr" + missingGroupName = "missingGroupName" + + existingTagName = "existingTagName" + existingTagErr = "existingTagErr" + missingTagName = "missingTagName" +) + +var testCtx = context.Background() + +func TestImporterPreImport(t *testing.T) { + var ( + title = "title" + code = "code" + details = "details" + url1 = "url1" + url2 = "url2" + rating = 3 + organized = true + + createdAt = time.Now().Add(-time.Hour) + updatedAt = time.Now().Add(-time.Minute) + + resumeTime = 1.234 + playDuration = 2.345 + ) + tests := []struct { + name string + input jsonschema.Audio + output models.Audio + }{ + { + "basic", + jsonschema.Audio{ + Title: title, + Code: code, + Details: details, + URLs: []string{url1, url2}, + Rating: rating, + Organized: organized, + CreatedAt: json.JSONTime{Time: createdAt}, + UpdatedAt: json.JSONTime{Time: updatedAt}, + ResumeTime: resumeTime, + PlayDuration: playDuration, + }, + models.Audio{ + Title: title, + Code: code, + Details: details, + URLs: models.NewRelatedStrings([]string{url1, url2}), + Rating: &rating, + Organized: organized, + CreatedAt: createdAt.Truncate(0), + UpdatedAt: updatedAt.Truncate(0), + ResumeTime: resumeTime, + PlayDuration: playDuration, + + Files: models.NewRelatedVideoFiles([]*models.VideoFile{}), + GalleryIDs: models.NewRelatedIDs([]int{}), + TagIDs: models.NewRelatedIDs([]int{}), + PerformerIDs: models.NewRelatedIDs([]int{}), + Groups: models.NewRelatedGroups([]models.GroupsAudios{}), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + i := Importer{ + Input: tt.input, + } + + if err := i.PreImport(testCtx); err != nil { + t.Errorf("PreImport() error = %v", err) + return + } + + assert.Equal(t, tt.output, i.audio) + }) + } +} + +func truncateTimes(t []time.Time) []time.Time { + return sliceutil.Map(t, func(t time.Time) time.Time { return t.Truncate(0) }) +} + +func TestImporterPreImportHistory(t *testing.T) { + var ( + playTime1 = time.Now().Add(-time.Hour * 2) + playTime2 = time.Now().Add(-time.Minute * 2) + oTime1 = time.Now().Add(-time.Hour * 3) + oTime2 = time.Now().Add(-time.Minute * 3) + ) + tests := []struct { + name string + input jsonschema.Audio + expectedPlayHistory []time.Time + expectedOHistory []time.Time + }{ + { + "basic", + jsonschema.Audio{ + PlayHistory: []json.JSONTime{ + {Time: playTime1}, + {Time: playTime2}, + }, + OHistory: []json.JSONTime{ + {Time: oTime1}, + {Time: oTime2}, + }, + }, + []time.Time{playTime1, playTime2}, + []time.Time{oTime1, oTime2}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + i := Importer{ + Input: tt.input, + } + + if err := i.PreImport(testCtx); err != nil { + t.Errorf("PreImport() error = %v", err) + return + } + + // convert histories to unix timestamps for comparison + eph := truncateTimes(tt.expectedPlayHistory) + vh := truncateTimes(i.viewHistory) + + eoh := truncateTimes(tt.expectedOHistory) + oh := truncateTimes(i.oHistory) + + assert.Equal(t, eph, vh, "view history mismatch") + assert.Equal(t, eoh, oh, "o history mismatch") + }) + } +} + +func TestImporterPreImportCoverImage(t *testing.T) { + i := Importer{ + Input: jsonschema.Audio{ + Cover: invalidImage, + }, + } + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + i.Input.Cover = imageBase64 + + err = i.PreImport(testCtx) + assert.Nil(t, err) +} + +func TestImporterPreImportWithStudio(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + StudioWriter: db.Studio, + Input: jsonschema.Audio{ + Studio: existingStudioName, + }, + } + + db.Studio.On("FindByName", testCtx, existingStudioName, false).Return(&models.Studio{ + ID: existingStudioID, + }, nil).Once() + db.Studio.On("FindByName", testCtx, existingStudioErr, false).Return(nil, errors.New("FindByName error")).Once() + + err := i.PreImport(testCtx) + assert.Nil(t, err) + assert.Equal(t, existingStudioID, *i.audio.StudioID) + + i.Input.Studio = existingStudioErr + err = i.PreImport(testCtx) + assert.NotNil(t, err) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithMissingStudio(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + StudioWriter: db.Studio, + Input: jsonschema.Audio{ + Studio: missingStudioName, + }, + MissingRefBehaviour: models.ImportMissingRefEnumFail, + } + + db.Studio.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Times(3) + db.Studio.On("Create", testCtx, mock.AnythingOfType("*models.CreateStudioInput")).Run(func(args mock.Arguments) { + s := args.Get(1).(*models.CreateStudioInput) + s.Studio.ID = existingStudioID + }).Return(nil) + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore + err = i.PreImport(testCtx) + assert.Nil(t, err) + + i.MissingRefBehaviour = models.ImportMissingRefEnumCreate + err = i.PreImport(testCtx) + assert.Nil(t, err) + assert.Equal(t, existingStudioID, *i.audio.StudioID) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithMissingStudioCreateErr(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + StudioWriter: db.Studio, + Input: jsonschema.Audio{ + Studio: missingStudioName, + }, + MissingRefBehaviour: models.ImportMissingRefEnumCreate, + } + + db.Studio.On("FindByName", testCtx, missingStudioName, false).Return(nil, nil).Once() + db.Studio.On("Create", testCtx, mock.AnythingOfType("*models.CreateStudioInput")).Return(errors.New("Create error")) + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithPerformer(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + PerformerWriter: db.Performer, + MissingRefBehaviour: models.ImportMissingRefEnumFail, + Input: jsonschema.Audio{ + Performers: []string{ + existingPerformerName, + }, + }, + } + + db.Performer.On("FindByNames", testCtx, []string{existingPerformerName}, false).Return([]*models.Performer{ + { + ID: existingPerformerID, + Name: existingPerformerName, + }, + }, nil).Once() + db.Performer.On("FindByNames", testCtx, []string{existingPerformerErr}, false).Return(nil, errors.New("FindByNames error")).Once() + + err := i.PreImport(testCtx) + assert.Nil(t, err) + assert.Equal(t, []int{existingPerformerID}, i.audio.PerformerIDs.List()) + + i.Input.Performers = []string{existingPerformerErr} + err = i.PreImport(testCtx) + assert.NotNil(t, err) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithMissingPerformer(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + PerformerWriter: db.Performer, + Input: jsonschema.Audio{ + Performers: []string{ + missingPerformerName, + }, + }, + MissingRefBehaviour: models.ImportMissingRefEnumFail, + } + + db.Performer.On("FindByNames", testCtx, []string{missingPerformerName}, false).Return(nil, nil).Times(3) + db.Performer.On("Create", testCtx, mock.AnythingOfType("*models.CreatePerformerInput")).Run(func(args mock.Arguments) { + p := args.Get(1).(*models.CreatePerformerInput) + p.ID = existingPerformerID + }).Return(nil) + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore + err = i.PreImport(testCtx) + assert.Nil(t, err) + + i.MissingRefBehaviour = models.ImportMissingRefEnumCreate + err = i.PreImport(testCtx) + assert.Nil(t, err) + assert.Equal(t, []int{existingPerformerID}, i.audio.PerformerIDs.List()) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithMissingPerformerCreateErr(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + PerformerWriter: db.Performer, + Input: jsonschema.Audio{ + Performers: []string{ + missingPerformerName, + }, + }, + MissingRefBehaviour: models.ImportMissingRefEnumCreate, + } + + db.Performer.On("FindByNames", testCtx, []string{missingPerformerName}, false).Return(nil, nil).Once() + db.Performer.On("Create", testCtx, mock.AnythingOfType("*models.CreatePerformerInput")).Return(errors.New("Create error")) + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithGroup(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + GroupWriter: db.Group, + MissingRefBehaviour: models.ImportMissingRefEnumFail, + Input: jsonschema.Audio{ + Groups: []jsonschema.AudioGroup{ + { + GroupName: existingGroupName, + AudioIndex: 1, + }, + }, + }, + } + + db.Group.On("FindByName", testCtx, existingGroupName, false).Return(&models.Group{ + ID: existingGroupID, + Name: existingGroupName, + }, nil).Once() + db.Group.On("FindByName", testCtx, existingGroupErr, false).Return(nil, errors.New("FindByName error")).Once() + + err := i.PreImport(testCtx) + assert.Nil(t, err) + assert.Equal(t, existingGroupID, i.audio.Groups.List()[0].GroupID) + + i.Input.Groups[0].GroupName = existingGroupErr + err = i.PreImport(testCtx) + assert.NotNil(t, err) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithMissingGroup(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + GroupWriter: db.Group, + Input: jsonschema.Audio{ + Groups: []jsonschema.AudioGroup{ + { + GroupName: missingGroupName, + }, + }, + }, + MissingRefBehaviour: models.ImportMissingRefEnumFail, + } + + db.Group.On("FindByName", testCtx, missingGroupName, false).Return(nil, nil).Times(3) + db.Group.On("Create", testCtx, mock.AnythingOfType("*models.Group")).Run(func(args mock.Arguments) { + m := args.Get(1).(*models.Group) + m.ID = existingGroupID + }).Return(nil) + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore + err = i.PreImport(testCtx) + assert.Nil(t, err) + + i.MissingRefBehaviour = models.ImportMissingRefEnumCreate + err = i.PreImport(testCtx) + assert.Nil(t, err) + assert.Equal(t, existingGroupID, i.audio.Groups.List()[0].GroupID) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithMissingGroupCreateErr(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + GroupWriter: db.Group, + Input: jsonschema.Audio{ + Groups: []jsonschema.AudioGroup{ + { + GroupName: missingGroupName, + }, + }, + }, + MissingRefBehaviour: models.ImportMissingRefEnumCreate, + } + + db.Group.On("FindByName", testCtx, missingGroupName, false).Return(nil, nil).Once() + db.Group.On("Create", testCtx, mock.AnythingOfType("*models.Group")).Return(errors.New("Create error")) + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithTag(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + TagWriter: db.Tag, + MissingRefBehaviour: models.ImportMissingRefEnumFail, + Input: jsonschema.Audio{ + Tags: []string{ + existingTagName, + }, + }, + } + + db.Tag.On("FindByNames", testCtx, []string{existingTagName}, false).Return([]*models.Tag{ + { + ID: existingTagID, + Name: existingTagName, + }, + }, nil).Once() + db.Tag.On("FindByNames", testCtx, []string{existingTagErr}, false).Return(nil, errors.New("FindByNames error")).Once() + + err := i.PreImport(testCtx) + assert.Nil(t, err) + assert.Equal(t, []int{existingTagID}, i.audio.TagIDs.List()) + + i.Input.Tags = []string{existingTagErr} + err = i.PreImport(testCtx) + assert.NotNil(t, err) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithMissingTag(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + TagWriter: db.Tag, + Input: jsonschema.Audio{ + Tags: []string{ + missingTagName, + }, + }, + MissingRefBehaviour: models.ImportMissingRefEnumFail, + } + + db.Tag.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Times(3) + db.Tag.On("Create", testCtx, mock.AnythingOfType("*models.CreateTagInput")).Run(func(args mock.Arguments) { + t := args.Get(1).(*models.CreateTagInput) + t.Tag.ID = existingTagID + }).Return(nil) + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + i.MissingRefBehaviour = models.ImportMissingRefEnumIgnore + err = i.PreImport(testCtx) + assert.Nil(t, err) + + i.MissingRefBehaviour = models.ImportMissingRefEnumCreate + err = i.PreImport(testCtx) + assert.Nil(t, err) + assert.Equal(t, []int{existingTagID}, i.audio.TagIDs.List()) + + db.AssertExpectations(t) +} + +func TestImporterPreImportWithMissingTagCreateErr(t *testing.T) { + db := mocks.NewDatabase() + + i := Importer{ + TagWriter: db.Tag, + Input: jsonschema.Audio{ + Tags: []string{ + missingTagName, + }, + }, + MissingRefBehaviour: models.ImportMissingRefEnumCreate, + } + + db.Tag.On("FindByNames", testCtx, []string{missingTagName}, false).Return(nil, nil).Once() + db.Tag.On("Create", testCtx, mock.AnythingOfType("*models.CreateTagInput")).Return(errors.New("Create error")) + + err := i.PreImport(testCtx) + assert.NotNil(t, err) + + db.AssertExpectations(t) +} + +func TestImporterPostImport(t *testing.T) { + db := mocks.NewDatabase() + + vt := time.Now() + ot := vt.Add(time.Minute) + + var ( + okID = 1 + errViewHistoryID = 2 + errOHistoryID = 3 + errImageID = 4 + errCustomFieldsID = 5 + ) + + var ( + errImage = errors.New("error updating cover image") + errViewHistory = errors.New("error updating view history") + errOHistory = errors.New("error updating o history") + errCustomFields = errors.New("error updating custom fields") + ) + + table := []struct { + name string + importer Importer + err bool + }{ + { + name: "all set successfully", + importer: Importer{ + ID: okID, + coverImageData: []byte(imageBase64), + viewHistory: []time.Time{vt}, + oHistory: []time.Time{ot}, + customFields: customFields, + }, + err: false, + }, + { + name: "cover image set with error", + importer: Importer{ + ID: errImageID, + coverImageData: []byte(invalidImage), + }, + err: true, + }, + { + name: "view history set with error", + importer: Importer{ + ID: errViewHistoryID, + viewHistory: []time.Time{vt}, + }, + err: true, + }, + { + name: "o history set with error", + importer: Importer{ + ID: errOHistoryID, + oHistory: []time.Time{ot}, + }, + err: true, + }, + { + name: "custom fields set with error", + importer: Importer{ + ID: errCustomFieldsID, + customFields: customFields, + }, + err: true, + }, + } + + db.Audio.On("UpdateCover", testCtx, okID, []byte(imageBase64)).Return(nil).Once() + db.Audio.On("UpdateCover", testCtx, errImageID, []byte(invalidImage)).Return(errImage).Once() + db.Audio.On("AddViews", testCtx, okID, []time.Time{vt}).Return([]time.Time{vt}, nil).Once() + db.Audio.On("AddViews", testCtx, errViewHistoryID, []time.Time{vt}).Return(nil, errViewHistory).Once() + db.Audio.On("AddO", testCtx, okID, []time.Time{ot}).Return([]time.Time{ot}, nil).Once() + db.Audio.On("AddO", testCtx, errOHistoryID, []time.Time{ot}).Return(nil, errOHistory).Once() + db.Audio.On("SetCustomFields", testCtx, okID, models.CustomFieldsInput{ + Full: customFields, + }).Return(nil).Once() + db.Audio.On("SetCustomFields", testCtx, errCustomFieldsID, models.CustomFieldsInput{ + Full: customFields, + }).Return(errCustomFields).Once() + + for _, tt := range table { + t.Run(tt.name, func(t *testing.T) { + i := tt.importer + i.ReaderWriter = db.Audio + + err := i.PostImport(testCtx, i.ID) + + if tt.err { + assert.NotNil(t, err, "expected error but got nil") + } else { + assert.Nil(t, err, "unexpected error: %v", err) + } + }) + } +} diff --git a/pkg/audio/merge.go b/pkg/audio/merge.go new file mode 100644 index 0000000000..30104e6b36 --- /dev/null +++ b/pkg/audio/merge.go @@ -0,0 +1,123 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "errors" + "fmt" + "slices" + "time" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil" +) + +type MergeOptions struct { + AudioPartial models.AudioPartial + IncludePlayHistory bool + IncludeOHistory bool +} + +func (s *Service) Merge(ctx context.Context, sourceIDs []int, destinationID int, fileDeleter *FileDeleter, options MergeOptions) error { + audioPartial := options.AudioPartial + + // ensure source ids are unique + sourceIDs = sliceutil.AppendUniques(nil, sourceIDs) + + // ensure destination is not in source list + if slices.Contains(sourceIDs, destinationID) { + return errors.New("destination audio cannot be in source list") + } + + dest, err := s.Repository.Find(ctx, destinationID) + if err != nil { + return fmt.Errorf("finding destination audio ID %d: %w", destinationID, err) + } + + sources, err := s.Repository.FindMany(ctx, sourceIDs) + if err != nil { + return fmt.Errorf("finding source audios: %w", err) + } + + var fileIDs []models.FileID + + for _, src := range sources { + if err := src.LoadRelationships(ctx, s.Repository); err != nil { + return fmt.Errorf("loading audio relationships from %d: %w", src.ID, err) + } + + for _, f := range src.Files.List() { + fileIDs = append(fileIDs, f.Base().ID) + } + } + + // move files to destination audio + if len(fileIDs) > 0 { + if err := s.Repository.AssignFiles(ctx, destinationID, fileIDs); err != nil { + return fmt.Errorf("moving files to destination audio: %w", err) + } + + // if audio didn't already have a primary file, then set it now + if dest.PrimaryFileID == nil { + audioPartial.PrimaryFileID = &fileIDs[0] + } else { + // don't allow changing primary file ID from the input values + audioPartial.PrimaryFileID = nil + } + } + + if _, err := s.Repository.UpdatePartial(ctx, destinationID, audioPartial); err != nil { + return fmt.Errorf("updating audio: %w", err) + } + + // merge play history + if options.IncludePlayHistory { + var allDates []time.Time + for _, src := range sources { + thisDates, err := s.Repository.GetViewDates(ctx, src.ID) + if err != nil { + return fmt.Errorf("getting view dates for audio %d: %w", src.ID, err) + } + + allDates = append(allDates, thisDates...) + } + + if len(allDates) > 0 { + if _, err := s.Repository.AddViews(ctx, destinationID, allDates); err != nil { + return fmt.Errorf("adding view dates to audio %d: %w", destinationID, err) + } + } + } + + // merge o history + if options.IncludeOHistory { + var allDates []time.Time + for _, src := range sources { + thisDates, err := s.Repository.GetODates(ctx, src.ID) + if err != nil { + return fmt.Errorf("getting o dates for audio %d: %w", src.ID, err) + } + + allDates = append(allDates, thisDates...) + } + + if len(allDates) > 0 { + if _, err := s.Repository.AddO(ctx, destinationID, allDates); err != nil { + return fmt.Errorf("adding o dates to audio %d: %w", destinationID, err) + } + } + } + + // delete old audios + for _, src := range sources { + const deleteGenerated = true + const deleteFile = false + const destroyFileEntry = false + if err := s.Destroy(ctx, src, fileDeleter, deleteGenerated, deleteFile, destroyFileEntry); err != nil { + return fmt.Errorf("deleting audio %d: %w", src.ID, err) + } + } + + return nil +} diff --git a/pkg/audio/migrate_hash.go b/pkg/audio/migrate_hash.go new file mode 100644 index 0000000000..7035613ce9 --- /dev/null +++ b/pkg/audio/migrate_hash.go @@ -0,0 +1,111 @@ +// TODO(audio): update this file + +package audio + +import ( + "bytes" + "os" + "path/filepath" + + "github.com/stashapp/stash/pkg/fsutil" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/models/paths" +) + +func MigrateHash(p *paths.Paths, oldHash string, newHash string) { + oldPath := filepath.Join(p.Generated.Markers, oldHash) + newPath := filepath.Join(p.Generated.Markers, newHash) + migrateAudioFiles(oldPath, newPath) + + audioPaths := p.Audio + oldPath = audioPaths.GetVideoPreviewPath(oldHash) + newPath = audioPaths.GetVideoPreviewPath(newHash) + migrateAudioFiles(oldPath, newPath) + + oldPath = audioPaths.GetWebpPreviewPath(oldHash) + newPath = audioPaths.GetWebpPreviewPath(newHash) + migrateAudioFiles(oldPath, newPath) + + oldPath = audioPaths.GetTranscodePath(oldHash) + newPath = audioPaths.GetTranscodePath(newHash) + migrateAudioFiles(oldPath, newPath) + + oldVttPath := audioPaths.GetSpriteVttFilePath(oldHash) + newVttPath := audioPaths.GetSpriteVttFilePath(newHash) + migrateAudioFiles(oldVttPath, newVttPath) + + oldPath = audioPaths.GetSpriteImageFilePath(oldHash) + newPath = audioPaths.GetSpriteImageFilePath(newHash) + migrateAudioFiles(oldPath, newPath) + migrateVttFile(newVttPath, oldPath, newPath) + + oldPath = audioPaths.GetInteractiveHeatmapPath(oldHash) + newPath = audioPaths.GetInteractiveHeatmapPath(newHash) + migrateAudioFiles(oldPath, newPath) + + // #3986 - migrate audio marker files + markerPaths := p.AudioMarkers + oldPath = markerPaths.GetFolderPath(oldHash) + newPath = markerPaths.GetFolderPath(newHash) + migrateAudioFolder(oldPath, newPath) +} + +func migrateAudioFiles(oldName, newName string) { + oldExists, err := fsutil.FileExists(oldName) + if err != nil && !os.IsNotExist(err) { + logger.Errorf("Error checking existence of %s: %s", oldName, err.Error()) + return + } + + if oldExists { + logger.Infof("renaming %s to %s", oldName, newName) + if err := os.Rename(oldName, newName); err != nil { + logger.Errorf("error renaming %s to %s: %s", oldName, newName, err.Error()) + } + } +} + +// #2481: migrate vtt file contents in addition to renaming +func migrateVttFile(vttPath, oldSpritePath, newSpritePath string) { + // #3356 - don't try to migrate if the file doesn't exist + exists, err := fsutil.FileExists(vttPath) + if err != nil && !os.IsNotExist(err) { + logger.Errorf("Error checking existence of %s: %s", vttPath, err.Error()) + return + } + + if !exists { + return + } + + contents, err := os.ReadFile(vttPath) + if err != nil { + logger.Errorf("Error reading %s for vtt migration: %v", vttPath, err) + return + } + + oldSpriteBasename := filepath.Base(oldSpritePath) + newSpriteBasename := filepath.Base(newSpritePath) + + contents = bytes.ReplaceAll(contents, []byte(oldSpriteBasename), []byte(newSpriteBasename)) + + if err := os.WriteFile(vttPath, contents, 0644); err != nil { + logger.Errorf("Error writing %s for vtt migration: %v", vttPath, err) + return + } +} + +func migrateAudioFolder(oldName, newName string) { + oldExists, err := fsutil.DirExists(oldName) + if err != nil && !os.IsNotExist(err) { + logger.Errorf("Error checking existence of %s: %s", oldName, err.Error()) + return + } + + if oldExists { + logger.Infof("renaming %s to %s", oldName, newName) + if err := os.Rename(oldName, newName); err != nil { + logger.Errorf("error renaming %s to %s: %s", oldName, newName, err.Error()) + } + } +} diff --git a/pkg/audio/query.go b/pkg/audio/query.go new file mode 100644 index 0000000000..56226c0782 --- /dev/null +++ b/pkg/audio/query.go @@ -0,0 +1,160 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "fmt" + "path/filepath" + "strconv" + "strings" + + "github.com/stashapp/stash/pkg/job" + "github.com/stashapp/stash/pkg/models" +) + +// QueryOptions returns a AudioQueryOptions populated with the provided filters. +func QueryOptions(audioFilter *models.AudioFilterType, findFilter *models.FindFilterType, count bool) models.AudioQueryOptions { + return models.AudioQueryOptions{ + QueryOptions: models.QueryOptions{ + FindFilter: findFilter, + Count: count, + }, + AudioFilter: audioFilter, + } +} + +// QueryWithCount queries for audios, returning the audio objects and the total count. +func QueryWithCount(ctx context.Context, qb models.AudioQueryer, audioFilter *models.AudioFilterType, findFilter *models.FindFilterType) ([]*models.Audio, int, error) { + // this was moved from the queryBuilder code + // left here so that calling functions can reference this instead + result, err := qb.Query(ctx, QueryOptions(audioFilter, findFilter, true)) + if err != nil { + return nil, 0, err + } + + audios, err := result.Resolve(ctx) + if err != nil { + return nil, 0, err + } + + return audios, result.Count, nil +} + +// Query queries for audios using the provided filters. +func Query(ctx context.Context, qb models.AudioQueryer, audioFilter *models.AudioFilterType, findFilter *models.FindFilterType) ([]*models.Audio, error) { + result, err := qb.Query(ctx, QueryOptions(audioFilter, findFilter, false)) + if err != nil { + return nil, err + } + + audios, err := result.Resolve(ctx) + if err != nil { + return nil, err + } + + return audios, nil +} + +func BatchProcess(ctx context.Context, reader models.AudioQueryer, audioFilter *models.AudioFilterType, findFilter *models.FindFilterType, fn func(audio *models.Audio) error) error { + const batchSize = 1000 + + if findFilter == nil { + findFilter = &models.FindFilterType{} + } + + page := 1 + perPage := batchSize + findFilter.Page = &page + findFilter.PerPage = &perPage + + for more := true; more; { + if job.IsCancelled(ctx) { + return nil + } + + audios, err := Query(ctx, reader, audioFilter, findFilter) + if err != nil { + return fmt.Errorf("error querying for audios: %w", err) + } + + for _, audio := range audios { + if err := fn(audio); err != nil { + return err + } + } + + if len(audios) != batchSize { + more = false + } else { + *findFilter.Page++ + } + } + + return nil +} + +// FilterFromPaths creates a AudioFilterType that filters using the provided +// paths. +func FilterFromPaths(paths []string) *models.AudioFilterType { + ret := &models.AudioFilterType{} + or := ret + sep := string(filepath.Separator) + + for _, p := range paths { + if !strings.HasSuffix(p, sep) { + p += sep + } + + if ret.Path == nil { + or = ret + } else { + newOr := &models.AudioFilterType{} + or.Or = newOr + or = newOr + } + + or.Path = &models.StringCriterionInput{ + Modifier: models.CriterionModifierEquals, + Value: p + "%", + } + } + + return ret +} + +func CountByStudioID(ctx context.Context, r models.AudioQueryer, id int, depth *int) (int, error) { + filter := &models.AudioFilterType{ + Studios: &models.HierarchicalMultiCriterionInput{ + Value: []string{strconv.Itoa(id)}, + Modifier: models.CriterionModifierIncludes, + Depth: depth, + }, + } + + return r.QueryCount(ctx, filter, nil) +} + +func CountByTagID(ctx context.Context, r models.AudioQueryer, id int, depth *int) (int, error) { + filter := &models.AudioFilterType{ + Tags: &models.HierarchicalMultiCriterionInput{ + Value: []string{strconv.Itoa(id)}, + Modifier: models.CriterionModifierIncludes, + Depth: depth, + }, + } + + return r.QueryCount(ctx, filter, nil) +} + +func CountByGroupID(ctx context.Context, r models.AudioQueryer, id int, depth *int) (int, error) { + filter := &models.AudioFilterType{ + Groups: &models.HierarchicalMultiCriterionInput{ + Value: []string{strconv.Itoa(id)}, + Modifier: models.CriterionModifierIncludes, + Depth: depth, + }, + } + + return r.QueryCount(ctx, filter, nil) +} diff --git a/pkg/audio/scan.go b/pkg/audio/scan.go new file mode 100644 index 0000000000..c9d0cb7dc0 --- /dev/null +++ b/pkg/audio/scan.go @@ -0,0 +1,217 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/stashapp/stash/pkg/file/audio" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/paths" + "github.com/stashapp/stash/pkg/plugin" + "github.com/stashapp/stash/pkg/plugin/hook" + "github.com/stashapp/stash/pkg/txn" +) + +var ( + ErrNotAudioFile = errors.New("not a audio file") + + // fingerprint types to match with + // only try to match by data fingerprints, _not_ perceptual fingerprints + matchableFingerprintTypes = []string{models.FingerprintTypeOshash, models.FingerprintTypeMD5} +) + +type ScanCreatorUpdater interface { + FindByFileID(ctx context.Context, fileID models.FileID) ([]*models.Audio, error) + FindByFingerprints(ctx context.Context, fp []models.Fingerprint) ([]*models.Audio, error) + GetFiles(ctx context.Context, relatedID int) ([]*models.AudioFile, error) + + Create(ctx context.Context, newAudio *models.Audio, fileIDs []models.FileID) error + UpdatePartial(ctx context.Context, id int, updatedAudio models.AudioPartial) (*models.Audio, error) + AddFileID(ctx context.Context, id int, fileID models.FileID) error +} + +type ScanGalleryFinderUpdater interface { + FindByPath(ctx context.Context, p string) ([]*models.Gallery, error) + AddAudioIDs(ctx context.Context, galleryID int, audioIDs []int) error +} + +type ScanGenerator interface { + Generate(ctx context.Context, s *models.Audio, f *models.AudioFile) error +} + +type ScanHandler struct { + CreatorUpdater ScanCreatorUpdater + GalleryFinderUpdater ScanGalleryFinderUpdater + + ScanGenerator ScanGenerator + CaptionUpdater audio.CaptionUpdater + PluginCache *plugin.Cache + + FileNamingAlgorithm models.HashAlgorithm + Paths *paths.Paths +} + +func (h *ScanHandler) validate() error { + if h.CreatorUpdater == nil { + return errors.New("CreatorUpdater is required") + } + if h.ScanGenerator == nil { + return errors.New("ScanGenerator is required") + } + if h.CaptionUpdater == nil { + return errors.New("CaptionUpdater is required") + } + if !h.FileNamingAlgorithm.IsValid() { + return errors.New("FileNamingAlgorithm is required") + } + if h.Paths == nil { + return errors.New("Paths is required") + } + + return nil +} + +func (h *ScanHandler) Handle(ctx context.Context, f models.File, oldFile models.File) error { + if err := h.validate(); err != nil { + return err + } + + AudioFile, ok := f.(*models.AudioFile) + if !ok { + return ErrNotAudioFile + } + + if oldFile != nil { + if err := audio.CleanCaptions(ctx, AudioFile, nil, h.CaptionUpdater); err != nil { + return fmt.Errorf("cleaning captions: %w", err) + } + } + + // try to match the file to a audio + existing, err := h.CreatorUpdater.FindByFileID(ctx, f.Base().ID) + if err != nil { + return fmt.Errorf("finding existing audio: %w", err) + } + + if len(existing) == 0 { + // try also to match file by fingerprints + existing, err = h.CreatorUpdater.FindByFingerprints(ctx, AudioFile.Fingerprints.Filter(matchableFingerprintTypes...)) + if err != nil { + return fmt.Errorf("finding existing audio by fingerprints: %w", err) + } + } + + if len(existing) > 0 { + updateExisting := oldFile != nil + if err := h.associateExisting(ctx, existing, AudioFile, updateExisting); err != nil { + return err + } + } else { + // create a new audio + newAudio := models.NewAudio() + + logger.Infof("%s doesn't exist. Creating new audio...", f.Base().Path) + + if err := h.CreatorUpdater.Create(ctx, &newAudio, []models.FileID{AudioFile.ID}); err != nil { + return fmt.Errorf("creating new audio: %w", err) + } + + h.PluginCache.RegisterPostHooks(ctx, newAudio.ID, hook.AudioCreatePost, nil, nil) + + existing = []*models.Audio{&newAudio} + } + + if oldFile != nil { + // migrate hashes from the old file to the new + oldHash := GetHash(oldFile, h.FileNamingAlgorithm) + newHash := GetHash(f, h.FileNamingAlgorithm) + + if oldHash != "" && newHash != "" && oldHash != newHash { + MigrateHash(h.Paths, oldHash, newHash) + } + } + + if err := h.associateGallery(ctx, existing, f); err != nil { + return err + } + + // do this after the commit so that cover generation doesn't hold up the transaction + txn.AddPostCommitHook(ctx, func(ctx context.Context) { + for _, s := range existing { + if err := h.ScanGenerator.Generate(ctx, s, AudioFile); err != nil { + // just log if cover generation fails. We can try again on rescan + logger.Errorf("Error generating content for %s: %v", AudioFile.Path, err) + } + } + }) + + return nil +} + +func (h *ScanHandler) associateExisting(ctx context.Context, existing []*models.Audio, f *models.AudioFile, updateExisting bool) error { + for _, s := range existing { + if err := s.LoadFiles(ctx, h.CreatorUpdater); err != nil { + return err + } + + found := false + for _, sf := range s.Files.List() { + if sf.ID == f.ID { + found = true + break + } + } + + if !found { + logger.Infof("Adding %s to audio %s", f.Path, s.DisplayName()) + + if err := h.CreatorUpdater.AddFileID(ctx, s.ID, f.ID); err != nil { + return fmt.Errorf("adding file to audio: %w", err) + } + } + + if !found || updateExisting { + // update updated_at time when file association or content changes + audioPartial := models.NewAudioPartial() + if _, err := h.CreatorUpdater.UpdatePartial(ctx, s.ID, audioPartial); err != nil { + return fmt.Errorf("updating audio: %w", err) + } + + h.PluginCache.RegisterPostHooks(ctx, s.ID, hook.AudioUpdatePost, nil, nil) + } + } + + return nil +} + +func (h *ScanHandler) associateGallery(ctx context.Context, existing []*models.Audio, f models.File) error { + audioIDs := make([]int, len(existing)) + for i, s := range existing { + audioIDs[i] = s.ID + } + + path := f.Base().Path + zipPath := strings.TrimSuffix(path, filepath.Ext(path)) + ".zip" + + // find galleries with a file that matches + galleries, err := h.GalleryFinderUpdater.FindByPath(ctx, zipPath) + if err != nil { + return err + } + + for _, gallery := range galleries { + // found related Audio + logger.Infof("associate: Audio %s is related to gallery: %d", path, gallery.ID) + if err := h.GalleryFinderUpdater.AddAudioIDs(ctx, gallery.ID, audioIDs); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/audio/scan_test.go b/pkg/audio/scan_test.go new file mode 100644 index 0000000000..c25c2b8f2f --- /dev/null +++ b/pkg/audio/scan_test.go @@ -0,0 +1,116 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "testing" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/mocks" + "github.com/stashapp/stash/pkg/plugin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestAssociateExisting_UpdatePartialOnContentChange(t *testing.T) { + const ( + testAudioID = 1 + testFileID = 100 + ) + + existingFile := &models.VideoFile{ + BaseFile: &models.BaseFile{ID: models.FileID(testFileID), Path: "test.mp4"}, + } + + makeAudio := func() *models.Audio { + return &models.Audio{ + ID: testAudioID, + Files: models.NewRelatedVideoFiles([]*models.VideoFile{existingFile}), + } + } + + tests := []struct { + name string + updateExisting bool + expectUpdate bool + }{ + { + name: "calls UpdatePartial when file content changed", + updateExisting: true, + expectUpdate: true, + }, + { + name: "skips UpdatePartial when file unchanged and already associated", + updateExisting: false, + expectUpdate: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := mocks.NewDatabase() + db.Audio.On("GetFiles", mock.Anything, testAudioID).Return([]*models.VideoFile{existingFile}, nil) + + if tt.expectUpdate { + db.Audio.On("UpdatePartial", mock.Anything, testAudioID, mock.Anything). + Return(&models.Audio{ID: testAudioID}, nil) + } + + h := &ScanHandler{ + CreatorUpdater: db.Audio, + PluginCache: &plugin.Cache{}, + } + + db.WithTxnCtx(func(ctx context.Context) { + err := h.associateExisting(ctx, []*models.Audio{makeAudio()}, existingFile, tt.updateExisting) + assert.NoError(t, err) + }) + + if tt.expectUpdate { + db.Audio.AssertCalled(t, "UpdatePartial", mock.Anything, testAudioID, mock.Anything) + } else { + db.Audio.AssertNotCalled(t, "UpdatePartial", mock.Anything, mock.Anything, mock.Anything) + } + }) + } +} + +func TestAssociateExisting_UpdatePartialOnNewFile(t *testing.T) { + const ( + testAudioID = 1 + existFileID = 100 + newFileID = 200 + ) + + existingFile := &models.VideoFile{ + BaseFile: &models.BaseFile{ID: models.FileID(existFileID), Path: "existing.mp4"}, + } + newFile := &models.VideoFile{ + BaseFile: &models.BaseFile{ID: models.FileID(newFileID), Path: "new.mp4"}, + } + + audio := &models.Audio{ + ID: testAudioID, + Files: models.NewRelatedVideoFiles([]*models.VideoFile{existingFile}), + } + + db := mocks.NewDatabase() + db.Audio.On("GetFiles", mock.Anything, testAudioID).Return([]*models.VideoFile{existingFile}, nil) + db.Audio.On("AddFileID", mock.Anything, testAudioID, models.FileID(newFileID)).Return(nil) + db.Audio.On("UpdatePartial", mock.Anything, testAudioID, mock.Anything). + Return(&models.Audio{ID: testAudioID}, nil) + + h := &ScanHandler{ + CreatorUpdater: db.Audio, + PluginCache: &plugin.Cache{}, + } + + db.WithTxnCtx(func(ctx context.Context) { + err := h.associateExisting(ctx, []*models.Audio{audio}, newFile, false) + assert.NoError(t, err) + }) + + db.Audio.AssertCalled(t, "AddFileID", mock.Anything, testAudioID, models.FileID(newFileID)) + db.Audio.AssertCalled(t, "UpdatePartial", mock.Anything, testAudioID, mock.Anything) +} diff --git a/pkg/audio/service.go b/pkg/audio/service.go new file mode 100644 index 0000000000..fc45b8a47d --- /dev/null +++ b/pkg/audio/service.go @@ -0,0 +1,25 @@ +// TODO(audio): update this file + +// Package audio provides the application logic for audio functionality. +// Most functionality is provided by [Service]. +package audio + +import ( + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/paths" + "github.com/stashapp/stash/pkg/plugin" +) + +type Config interface { + GetAudioFileNamingAlgorithm() models.HashAlgorithm +} + +type Service struct { + File models.FileReaderWriter + Repository models.AudioReaderWriter + MarkerRepository models.AudioMarkerReaderWriter + PluginCache *plugin.Cache + + Paths *paths.Paths + Config Config +} diff --git a/pkg/audio/update.go b/pkg/audio/update.go new file mode 100644 index 0000000000..d3158fd96f --- /dev/null +++ b/pkg/audio/update.go @@ -0,0 +1,130 @@ +// TODO(audio): update this file + +package audio + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/utils" +) + +var ErrEmptyUpdater = errors.New("no fields have been set") + +// UpdateSet is used to update a audio and its relationships. +type UpdateSet struct { + ID int + + Partial models.AudioPartial + + // in future these could be moved into a separate struct and reused + // for a Creator struct + + // Not set if nil. Set to []byte{} to clear existing + CoverImage []byte +} + +// IsEmpty returns true if there is nothing to update. +func (u *UpdateSet) IsEmpty() bool { + withoutID := u.Partial + + return withoutID == models.AudioPartial{} && + u.CoverImage == nil +} + +// Update updates a audio by updating the fields in the Partial field, then +// updates non-nil relationships. Returns an error if there is no work to +// be done. +func (u *UpdateSet) Update(ctx context.Context, qb models.AudioUpdater) (*models.Audio, error) { + if u.IsEmpty() { + return nil, ErrEmptyUpdater + } + + partial := u.Partial + updatedAt := time.Now() + partial.UpdatedAt = models.NewOptionalTime(updatedAt) + + ret, err := qb.UpdatePartial(ctx, u.ID, partial) + if err != nil { + return nil, fmt.Errorf("error updating audio: %w", err) + } + + if u.CoverImage != nil { + if err := qb.UpdateCover(ctx, u.ID, u.CoverImage); err != nil { + return nil, fmt.Errorf("error updating audio cover: %w", err) + } + } + + return ret, nil +} + +// UpdateInput converts the UpdateSet into AudioUpdateInput for hook firing purposes. +func (u UpdateSet) UpdateInput() models.AudioUpdateInput { + // ensure the partial ID is set + ret := u.Partial.UpdateInput(u.ID) + + if u.CoverImage != nil { + // convert back to base64 + data := utils.GetBase64StringFromData(u.CoverImage) + ret.CoverImage = &data + } + + return ret +} + +func AddPerformer(ctx context.Context, qb models.AudioUpdater, o *models.Audio, performerID int) error { + audioPartial := models.NewAudioPartial() + audioPartial.PerformerIDs = &models.UpdateIDs{ + IDs: []int{performerID}, + Mode: models.RelationshipUpdateModeAdd, + } + _, err := qb.UpdatePartial(ctx, o.ID, audioPartial) + return err +} + +func AddTag(ctx context.Context, qb models.AudioUpdater, o *models.Audio, tagID int) error { + audioPartial := models.NewAudioPartial() + audioPartial.TagIDs = &models.UpdateIDs{ + IDs: []int{tagID}, + Mode: models.RelationshipUpdateModeAdd, + } + _, err := qb.UpdatePartial(ctx, o.ID, audioPartial) + return err +} + +func AddGallery(ctx context.Context, qb models.AudioUpdater, o *models.Audio, galleryID int) error { + audioPartial := models.NewAudioPartial() + audioPartial.TagIDs = &models.UpdateIDs{ + IDs: []int{galleryID}, + Mode: models.RelationshipUpdateModeAdd, + } + _, err := qb.UpdatePartial(ctx, o.ID, audioPartial) + return err +} + +func (s *Service) AssignFile(ctx context.Context, audioID int, fileID models.FileID) error { + // ensure file isn't a primary file and that it is a video file + f, err := s.File.Find(ctx, fileID) + if err != nil { + return err + } + + ff := f[0] + if _, ok := ff.(*models.VideoFile); !ok { + return fmt.Errorf("%s is not a video file", ff.Base().Path) + } + + isPrimary, err := s.File.IsPrimary(ctx, fileID) + if err != nil { + return err + } + + if isPrimary { + return errors.New("cannot reassign primary file") + } + + return s.Repository.AssignFiles(ctx, audioID, []models.FileID{fileID}) +} diff --git a/pkg/audio/update_test.go b/pkg/audio/update_test.go new file mode 100644 index 0000000000..8d5410c6de --- /dev/null +++ b/pkg/audio/update_test.go @@ -0,0 +1,273 @@ +// TODO(audio): update this file + +package audio + +import ( + "errors" + "strconv" + "testing" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/models/mocks" + "github.com/stashapp/stash/pkg/sliceutil/intslice" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestUpdater_IsEmpty(t *testing.T) { + organized := true + ids := []int{1} + cover := []byte{1} + + tests := []struct { + name string + u *UpdateSet + want bool + }{ + { + "empty", + &UpdateSet{}, + true, + }, + { + "partial set", + &UpdateSet{ + Partial: models.AudioPartial{ + Organized: models.NewOptionalBool(organized), + }, + }, + false, + }, + { + "performer set", + &UpdateSet{ + Partial: models.AudioPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: ids, + Mode: models.RelationshipUpdateModeSet, + }, + }, + }, + false, + }, + { + "tags set", + &UpdateSet{ + Partial: models.AudioPartial{ + TagIDs: &models.UpdateIDs{ + IDs: ids, + Mode: models.RelationshipUpdateModeSet, + }, + }, + }, + false, + }, + { + "cover set", + &UpdateSet{ + CoverImage: cover, + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.u.IsEmpty(); got != tt.want { + t.Errorf("Updater.IsEmpty() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestUpdater_Update(t *testing.T) { + const ( + audioID = iota + 1 + badUpdateID + badPerformersID + badTagsID + badCoverID + performerID + tagID + ) + + performerIDs := []int{performerID} + tagIDs := []int{tagID} + + title := "title" + cover := []byte("cover") + + validAudio := &models.Audio{} + + updateErr := errors.New("error updating") + + db := mocks.NewDatabase() + + db.Audio.On("UpdatePartial", testCtx, mock.MatchedBy(func(id int) bool { + return id != badUpdateID + }), mock.Anything).Return(validAudio, nil) + db.Audio.On("UpdatePartial", testCtx, badUpdateID, mock.Anything).Return(nil, updateErr) + + db.Audio.On("UpdateCover", testCtx, audioID, cover).Return(nil).Once() + db.Audio.On("UpdateCover", testCtx, badCoverID, cover).Return(updateErr).Once() + + tests := []struct { + name string + u *UpdateSet + wantNil bool + wantErr bool + }{ + { + "empty", + &UpdateSet{ + ID: audioID, + }, + true, + true, + }, + { + "update all", + &UpdateSet{ + ID: audioID, + Partial: models.AudioPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: performerIDs, + Mode: models.RelationshipUpdateModeSet, + }, + TagIDs: &models.UpdateIDs{ + IDs: tagIDs, + Mode: models.RelationshipUpdateModeSet, + }, + }, + CoverImage: cover, + }, + false, + false, + }, + { + "update fields only", + &UpdateSet{ + ID: audioID, + Partial: models.AudioPartial{ + Title: models.NewOptionalString(title), + }, + }, + false, + false, + }, + { + "error updating audio", + &UpdateSet{ + ID: badUpdateID, + Partial: models.AudioPartial{ + Title: models.NewOptionalString(title), + }, + }, + true, + true, + }, + { + "error updating cover", + &UpdateSet{ + ID: badCoverID, + CoverImage: cover, + }, + true, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.u.Update(testCtx, db.Audio) + if (err != nil) != tt.wantErr { + t.Errorf("Updater.Update() error = %v, wantErr %v", err, tt.wantErr) + return + } + if (got == nil) != tt.wantNil { + t.Errorf("Updater.Update() = %v, want %v", got, tt.wantNil) + } + }) + } + + db.AssertExpectations(t) +} + +func TestUpdateSet_UpdateInput(t *testing.T) { + const ( + audioID = iota + 1 + badUpdateID + badPerformersID + badTagsID + badCoverID + performerID + tagID + ) + + audioIDStr := strconv.Itoa(audioID) + + performerIDs := []int{performerID} + performerIDStrs := intslice.IntSliceToStringSlice(performerIDs) + tagIDs := []int{tagID} + tagIDStrs := intslice.IntSliceToStringSlice(tagIDs) + + title := "title" + cover := []byte("cover") + coverB64 := "Y292ZXI=" + + tests := []struct { + name string + u UpdateSet + want models.AudioUpdateInput + }{ + { + "empty", + UpdateSet{ + ID: audioID, + }, + models.AudioUpdateInput{ + ID: audioIDStr, + }, + }, + { + "update all", + UpdateSet{ + ID: audioID, + Partial: models.AudioPartial{ + PerformerIDs: &models.UpdateIDs{ + IDs: performerIDs, + Mode: models.RelationshipUpdateModeSet, + }, + TagIDs: &models.UpdateIDs{ + IDs: tagIDs, + Mode: models.RelationshipUpdateModeSet, + }, + }, + CoverImage: cover, + }, + models.AudioUpdateInput{ + ID: audioIDStr, + PerformerIds: performerIDStrs, + TagIds: tagIDStrs, + CoverImage: &coverB64, + }, + }, + { + "update fields only", + UpdateSet{ + ID: audioID, + Partial: models.AudioPartial{ + Title: models.NewOptionalString(title), + }, + }, + models.AudioUpdateInput{ + ID: audioIDStr, + Title: &title, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.u.UpdateInput() + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/ffmpeg/ffprobe.go b/pkg/ffmpeg/ffprobe.go index 59f8ed218b..bb52aa0261 100644 --- a/pkg/ffmpeg/ffprobe.go +++ b/pkg/ffmpeg/ffprobe.go @@ -118,11 +118,11 @@ type VideoFile struct { // TranscodeScale calculates the dimension scaling for a transcode, where maxSize is the maximum size of the longest dimension of the input video. // If no scaling is required, then returns 0, 0. // Returns -2 for the dimension that will scale to maintain aspect ratio. -func (v *VideoFile) TranscodeScale(maxSize int) (int, int) { +func (a *VideoFile) TranscodeScale(maxSize int) (int, int) { // get the smaller dimension of the video file - videoSize := v.Height - if v.Width < videoSize { - videoSize = v.Width + videoSize := a.Height + if a.Width < videoSize { + videoSize = a.Width } // if our streaming resolution is larger than the video dimension @@ -134,7 +134,7 @@ func (v *VideoFile) TranscodeScale(maxSize int) (int, int) { // we're setting either the width or height // we'll set the smaller dimesion - if v.Width > v.Height { + if a.Width > a.Height { // set the height return -2, maxSize } @@ -365,23 +365,150 @@ func isRotated(s *FFProbeStream) bool { return false } -func (v *VideoFile) getAudioStream() *FFProbeStream { - index := v.getStreamIndex("audio", v.JSON) +func (a *VideoFile) getAudioStream() *FFProbeStream { + index := a.getStreamIndex("audio", a.JSON) if index != -1 { - return &v.JSON.Streams[index] + return &a.JSON.Streams[index] } return nil } -func (v *VideoFile) getVideoStream() *FFProbeStream { - index := v.getStreamIndex("video", v.JSON) +func (a *VideoFile) getVideoStream() *FFProbeStream { + index := a.getStreamIndex("video", a.JSON) if index != -1 { - return &v.JSON.Streams[index] + return &a.JSON.Streams[index] } return nil } -func (v *VideoFile) getStreamIndex(fileType string, probeJSON FFProbeJSON) int { +func (a *VideoFile) getStreamIndex(fileType string, probeJSON FFProbeJSON) int { + ret := -1 + for i, stream := range probeJSON.Streams { + // skip cover art/thumbnails + if stream.CodecType == fileType && stream.Disposition.AttachedPic == 0 { + // prefer default stream + if stream.Disposition.Default == 1 { + return i + } + + // backwards compatible behaviour - fallback to first matching stream + if ret == -1 { + ret = i + } + } + } + + return ret +} + +// AUDIO + +// AudioFile represents the ffprobe output for a audio file. +type AudioFile struct { + JSON FFProbeJSON + AudioStream *FFProbeStream + + Path string + Title string + Comment string + Container string + // FileDuration is the declared (meta-data) duration of the *file*. + // In most cases (sprites, previews, etc.) we actually care about the duration of the audio stream specifically, + // because those two can differ slightly (e.g. audio stream longer than the audio stream, making the whole file + // longer). + FileDuration float64 + AudioStreamDuration float64 + StartTime float64 + Bitrate int64 + Size int64 + CreationTime time.Time + + AudioCodec string + SampleRate float64 +} + +// NewAudioFile runs ffprobe on the given path and returns a AudioFile. +func (f *FFProbe) NewAudioFile(audioPath string) (*AudioFile, error) { + args := []string{ + "-v", + "quiet", + "-print_format", "json", + "-show_format", + "-show_streams", + "-show_error", + } + + // show_entries stream_side_data=rotation requires 5.x or later ffprobe + if f.version.major >= 5 { + args = append(args, "-show_entries", "stream_side_data=rotation") + } + + args = append(args, audioPath) + + cmd := stashExec.Command(f.path, args...) + out, err := cmd.Output() + + if err != nil { + return nil, fmt.Errorf("FFProbe encountered an error with <%s>.\nError JSON:\n%s\nError: %s", audioPath, string(out), err.Error()) + } + + probeJSON := &FFProbeJSON{} + if err := json.Unmarshal(out, probeJSON); err != nil { + return nil, fmt.Errorf("error unmarshalling audio data for <%s>: %s", audioPath, err.Error()) + } + + return parseAudio(audioPath, probeJSON) +} + +func parseAudio(filePath string, probeJSON *FFProbeJSON) (*AudioFile, error) { + if probeJSON == nil { + return nil, fmt.Errorf("failed to get ffprobe json for <%s>", filePath) + } + + result := &AudioFile{} + result.JSON = *probeJSON + + if result.JSON.Error.Code != 0 { + return nil, fmt.Errorf("ffprobe error code %d: %s", result.JSON.Error.Code, result.JSON.Error.String) + } + + result.Path = filePath + result.Title = probeJSON.Format.Tags.Title + + result.Comment = probeJSON.Format.Tags.Comment + result.Bitrate, _ = strconv.ParseInt(probeJSON.Format.BitRate, 10, 64) + + result.Container = probeJSON.Format.FormatName + duration, _ := strconv.ParseFloat(probeJSON.Format.Duration, 64) + result.FileDuration = math.Round(duration*100) / 100 + fileStat, err := os.Stat(filePath) + if err != nil { + statErr := fmt.Errorf("error statting file <%s>: %w", filePath, err) + logger.Errorf("%v", statErr) + return nil, statErr + } + result.Size = fileStat.Size() + result.StartTime, _ = strconv.ParseFloat(probeJSON.Format.StartTime, 64) + result.CreationTime = probeJSON.Format.Tags.CreationTime.Time + + audioStream := result.getAudioStream() + if audioStream != nil { + result.AudioCodec = audioStream.CodecName + result.AudioStream = audioStream + } + + return result, nil +} + +func (a *AudioFile) getAudioStream() *FFProbeStream { + index := a.getStreamIndex("audio", a.JSON) + if index != -1 { + return &a.JSON.Streams[index] + } + return nil +} + +func (a *AudioFile) getStreamIndex(fileType string, probeJSON FFProbeJSON) int { ret := -1 for i, stream := range probeJSON.Streams { // skip cover art/thumbnails diff --git a/pkg/ffmpeg/stream.go b/pkg/ffmpeg/stream.go index cd043dadce..c7b21acade 100644 --- a/pkg/ffmpeg/stream.go +++ b/pkg/ffmpeg/stream.go @@ -18,6 +18,7 @@ const ( MimeMkvAudio string = "audio/x-matroska" MimeMp4Video string = "video/mp4" MimeMp4Audio string = "audio/mp4" + MimeMp3Audio string = "audio/mp3" ) type StreamManager struct { diff --git a/pkg/file/audio/caption.go b/pkg/file/audio/caption.go new file mode 100644 index 0000000000..ba542fc5e2 --- /dev/null +++ b/pkg/file/audio/caption.go @@ -0,0 +1,209 @@ +// TODO(audio): update this file +package audio + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/asticode/go-astisub" + "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/txn" + "golang.org/x/text/language" +) + +var CaptionExts = []string{"vtt", "srt"} // in a case where vtt and srt files are both provided prioritize vtt file due to native support + +// to be used for captions without a language code in the filename +// ISO 639-1 uses 2 or 3 a-z chars for codes so 00 is a safe non valid choise +// https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes +const LangUnknown = "00" + +// GetCaptionPath generates the path of a caption +// from a given file path, wanted language and caption sufffix +func GetCaptionPath(path, lang, suffix string) string { + ext := filepath.Ext(path) + fn := strings.TrimSuffix(path, ext) + captionExt := "" + if len(lang) == 0 || lang == LangUnknown { + captionExt = suffix + } else { + captionExt = lang + "." + suffix + } + return fn + "." + captionExt +} + +// ReadSubs reads a captions file +func ReadSubs(path string) (*astisub.Subtitles, error) { + return astisub.OpenFile(path) +} + +// IsValidLanguage checks whether the given string is a valid +// ISO 639 language code +func IsValidLanguage(lang string) bool { + _, err := language.ParseBase(lang) + return err == nil +} + +// IsLangInCaptions returns true if lang is present +// in the captions +func IsLangInCaptions(lang string, ext string, captions []*models.VideoCaption) bool { + for _, caption := range captions { + if lang == caption.LanguageCode && ext == caption.CaptionType { + return true + } + } + return false +} + +// getCaptionPrefix returns the prefix used to search for audio files for the provided caption path +func getCaptionPrefix(captionPath string) string { + basename := strings.TrimSuffix(captionPath, filepath.Ext(captionPath)) // caption filename without the extension + + // a caption file can be something like scene_filename.srt or scene_filename.en.srt + // if a language code is present and valid remove it from the basename + languageExt := filepath.Ext(basename) + if len(languageExt) > 2 && IsValidLanguage(languageExt[1:]) { + basename = strings.TrimSuffix(basename, languageExt) + } + + return basename + "." +} + +// GetCaptionsLangFromPath returns the language code from a given captions path +// If no valid language is present LangUknown is returned +func getCaptionsLangFromPath(captionPath string) string { + langCode := LangUnknown + basename := strings.TrimSuffix(captionPath, filepath.Ext(captionPath)) // caption filename without the extension + languageExt := filepath.Ext(basename) + if len(languageExt) > 2 && IsValidLanguage(languageExt[1:]) { + langCode = languageExt[1:] + } + return langCode +} + +type CaptionUpdater interface { + GetCaptions(ctx context.Context, fileID models.FileID) ([]*models.VideoCaption, error) + UpdateCaptions(ctx context.Context, fileID models.FileID, captions []*models.VideoCaption) error +} + +// MatchesCaption returns true if the caption file matches the audio file based on the filename +func MatchesCaption(audioPath, captionPath string) bool { + captionPrefix := getCaptionPrefix(captionPath) + audioPrefix := strings.TrimSuffix(audioPath, filepath.Ext(audioPath)) + "." + return captionPrefix == audioPrefix +} + +// associates captions to scene/s with the same basename +// returns true if the caption file was matched to a audio file and processed, false otherwise +func AssociateCaptions(ctx context.Context, captionPath string, txnMgr txn.Manager, fqb models.FileFinder, w CaptionUpdater) bool { + captionLang := getCaptionsLangFromPath(captionPath) + + captionPrefix := getCaptionPrefix(captionPath) + matched := false + if err := txn.WithTxn(ctx, txnMgr, func(ctx context.Context) error { + var err error + files, er := fqb.FindAllByPath(ctx, captionPrefix+"*", true) + + if er != nil { + return fmt.Errorf("searching for scene %s: %w", captionPrefix, er) + } + + for _, f := range files { + // found some files + // filter out non audio files + switch f.(type) { + case *models.AudioFile: + break + default: + continue + } + + fileID := f.Base().ID + path := f.Base().Path + + logger.Debugf("Matched captions to file %s", path) + matched = true + + captions, er := w.GetCaptions(ctx, fileID) + if er != nil { + return fmt.Errorf("getting captions for file %s: %w", path, er) + } + + fileExt := filepath.Ext(captionPath) + ext := fileExt[1:] + if !IsLangInCaptions(captionLang, ext, captions) { // only update captions if language code is not present + newCaption := &models.VideoCaption{ + LanguageCode: captionLang, + Filename: filepath.Base(captionPath), + CaptionType: ext, + } + captions = append(captions, newCaption) + er = w.UpdateCaptions(ctx, fileID, captions) + if er != nil { + return fmt.Errorf("updating captions for file %s: %w", path, er) + } + + logger.Debugf("Updated captions for file %s. Added %s", path, captionLang) + } + } + return err + }); err != nil { + logger.Error(err.Error()) + } + + return matched +} + +// CleanCaptions removes non existent/accessible language codes from captions +func CleanCaptions(ctx context.Context, f *models.AudioFile, txnMgr txn.Manager, w CaptionUpdater) error { + captions, err := w.GetCaptions(ctx, f.ID) + if err != nil { + return fmt.Errorf("getting captions for file %s: %w", f.Path, err) + } + + if len(captions) == 0 { + return nil + } + + filePath := f.Path + + changed := false + var newCaptions []*models.VideoCaption + + for _, caption := range captions { + captionPath := caption.Path(filePath) + _, err := os.Stat(captionPath) + if errors.Is(err, os.ErrNotExist) { + logger.Infof("Removing non existent caption %s for %s", caption.Filename, f.Path) + changed = true + } else { + // other errors are ignored for the purposes of cleaning + newCaptions = append(newCaptions, caption) + } + } + + if changed { + fn := func(ctx context.Context) error { + return w.UpdateCaptions(ctx, f.ID, newCaptions) + } + + // possible that we are already in a transaction and txnMgr is nil + // in that case just call the function directly + if txnMgr == nil { + err = fn(ctx) + } else { + err = txn.WithTxn(ctx, txnMgr, fn) + } + + if err != nil { + return fmt.Errorf("updating captions for file %s: %w", f.Path, err) + } + } + + return nil +} diff --git a/pkg/file/audio/caption_test.go b/pkg/file/audio/caption_test.go new file mode 100644 index 0000000000..199e180532 --- /dev/null +++ b/pkg/file/audio/caption_test.go @@ -0,0 +1,54 @@ +// TODO(audio): update this file +package audio + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type testCase struct { + captionPath string + expectedLang string + expectedResult string +} + +var testCases = []testCase{ + { + captionPath: "/stash/video.vtt", + expectedLang: LangUnknown, + expectedResult: "/stash/video.", + }, + { + captionPath: "/stash/video.en.vtt", + expectedLang: "en", + expectedResult: "/stash/video.", // lang code valid, remove en part + }, + { + captionPath: "/stash/video.test.srt", + expectedLang: LangUnknown, + expectedResult: "/stash/video.test.", // no lang code/lang code invalid test should remain + }, + { + captionPath: "C:\\videos\\video.fr.srt", + expectedLang: "fr", + expectedResult: "C:\\videos\\video.", + }, + { + captionPath: "C:\\videos\\video.xx.srt", + expectedLang: LangUnknown, + expectedResult: "C:\\videos\\video.xx.", // no lang code/lang code invalid xx should remain + }, +} + +func TestGenerateCaptionCandidates(t *testing.T) { + for _, c := range testCases { + assert.Equal(t, c.expectedResult, getCaptionPrefix(c.captionPath)) + } +} + +func TestGetCaptionsLangFromPath(t *testing.T) { + for _, l := range testCases { + assert.Equal(t, l.expectedLang, getCaptionsLangFromPath(l.captionPath)) + } +} diff --git a/pkg/file/audio/funscript.go b/pkg/file/audio/funscript.go new file mode 100644 index 0000000000..30d2447e36 --- /dev/null +++ b/pkg/file/audio/funscript.go @@ -0,0 +1,15 @@ +// TODO(audio): update this file +package audio + +import ( + "path/filepath" + "strings" +) + +// GetFunscriptPath returns the path of a file +// with the extension changed to .funscript +func GetFunscriptPath(path string) string { + ext := filepath.Ext(path) + fn := strings.TrimSuffix(path, ext) + return fn + ".funscript" +} diff --git a/pkg/file/audio/scan.go b/pkg/file/audio/scan.go new file mode 100644 index 0000000000..aa39fd3178 --- /dev/null +++ b/pkg/file/audio/scan.go @@ -0,0 +1,65 @@ +// TODO(audio): update this file +package audio + +import ( + "context" + "errors" + "fmt" + + "github.com/stashapp/stash/pkg/ffmpeg" + "github.com/stashapp/stash/pkg/file" + "github.com/stashapp/stash/pkg/models" +) + +// Decorator adds audio specific fields to a File. +type Decorator struct { + FFProbe *ffmpeg.FFProbe +} + +func (d *Decorator) Decorate(ctx context.Context, fs models.FS, f models.File) (models.File, error) { + if d.FFProbe == nil { + return f, errors.New("ffprobe not configured") + } + + base := f.Base() + // TODO - copy to temp file if not an OsFS + if _, isOs := fs.(*file.OsFS); !isOs { + return f, fmt.Errorf("audio.constructFile: only OsFS is supported") + } + + probe := d.FFProbe + audioFile, err := probe.NewAudioFile(base.Path) + if err != nil { + return f, fmt.Errorf("running ffprobe on %q: %w", base.Path, err) + } + + container, err := ffmpeg.MatchContainer(audioFile.Container, base.Path) + if err != nil { + return f, fmt.Errorf("matching container for %q: %w", base.Path, err) + } + + return &models.AudioFile{ + BaseFile: base, + Format: string(container), + AudioCodec: audioFile.AudioCodec, + Duration: audioFile.FileDuration, + SampleRate: audioFile.SampleRate, + BitRate: audioFile.Bitrate, + }, nil +} + +func (d *Decorator) IsMissingMetadata(ctx context.Context, fs models.FS, f models.File) bool { + const ( + unsetString = "unset" + unsetNumber = -1 + ) + + vf, ok := f.(*models.AudioFile) + if !ok { + return true + } + + return vf.AudioCodec == unsetString || + vf.Format == unsetString || vf.SampleRate == unsetNumber || + vf.Duration == unsetNumber || vf.BitRate == unsetNumber +} diff --git a/pkg/models/audio.go b/pkg/models/audio.go new file mode 100644 index 0000000000..2f57561a81 --- /dev/null +++ b/pkg/models/audio.go @@ -0,0 +1,204 @@ +// TODO(audio): update this file + +package models + +import "context" + +type AudioFilterType struct { + OperatorFilter[AudioFilterType] + ID *IntCriterionInput `json:"id"` + Title *StringCriterionInput `json:"title"` + Code *StringCriterionInput `json:"code"` + Details *StringCriterionInput `json:"details"` + // Filter by file oshash + Oshash *StringCriterionInput `json:"oshash"` + // Filter by file checksum + Checksum *StringCriterionInput `json:"checksum"` + // Filter by file phash + Phash *StringCriterionInput `json:"phash"` + // Filter by phash distance + PhashDistance *PhashDistanceCriterionInput `json:"phash_distance"` + // Filter by path + Path *StringCriterionInput `json:"path"` + // Filter by file count + FileCount *IntCriterionInput `json:"file_count"` + // Filter by rating expressed as 1-100 + Rating100 *IntCriterionInput `json:"rating100"` + // Filter by organized + Organized *bool `json:"organized"` + // Filter by o-counter + OCounter *IntCriterionInput `json:"o_counter"` + // Filter Audios by duplication criteria + Duplicated *DuplicationCriterionInput `json:"duplicated"` + // Filter by resolution + Resolution *ResolutionCriterionInput `json:"resolution"` + // Filter by orientation + Orientation *OrientationCriterionInput `json:"orientation"` + // Filter by samplerate + Samplerate *IntCriterionInput `json:"samplerate"` + // Filter by bitrate + Bitrate *IntCriterionInput `json:"bitrate"` + // Filter by audio codec + AudioCodec *StringCriterionInput `json:"audio_codec"` + // Filter by duration (in seconds) + Duration *IntCriterionInput `json:"duration"` + // Filter to only include audios which have markers. `true` or `false` + HasMarkers *string `json:"has_markers"` + // Filter to only include audios missing this property + IsMissing *string `json:"is_missing"` + // Filter to only include audios with this studio + Studios *HierarchicalMultiCriterionInput `json:"studios"` + // Filter to only include audios with this group + Groups *HierarchicalMultiCriterionInput `json:"groups"` + // Filter to only include audios with this movie + Movies *MultiCriterionInput `json:"movies"` + // Filter to only include audios with this gallery + Galleries *MultiCriterionInput `json:"galleries"` + // Filter to only include audios with these tags + Tags *HierarchicalMultiCriterionInput `json:"tags"` + // Filter by tag count + TagCount *IntCriterionInput `json:"tag_count"` + // Filter to only include audios with performers with these tags + PerformerTags *HierarchicalMultiCriterionInput `json:"performer_tags"` + // Filter audios that have performers that have been favorited + PerformerFavorite *bool `json:"performer_favorite"` + // Filter audios by performer age at time of audio + PerformerAge *IntCriterionInput `json:"performer_age"` + // Filter to only include audios with these performers + Performers *MultiCriterionInput `json:"performers"` + // Filter by performer count + PerformerCount *IntCriterionInput `json:"performer_count"` + // Filter by url + URL *StringCriterionInput `json:"url"` + // Filter by captions + Captions *StringCriterionInput `json:"captions"` + // Filter by resume time + ResumeTime *IntCriterionInput `json:"resume_time"` + // Filter by play count + PlayCount *IntCriterionInput `json:"play_count"` + // Filter by play duration (in seconds) + PlayDuration *IntCriterionInput `json:"play_duration"` + // Filter by last played at + LastPlayedAt *TimestampCriterionInput `json:"last_played_at"` + // Filter by date + Date *DateCriterionInput `json:"date"` + // Filter by related galleries that meet this criteria + GalleriesFilter *GalleryFilterType `json:"galleries_filter"` + // Filter by related performers that meet this criteria + PerformersFilter *PerformerFilterType `json:"performers_filter"` + // Filter by related studios that meet this criteria + StudiosFilter *StudioFilterType `json:"studios_filter"` + // Filter by related tags that meet this criteria + TagsFilter *TagFilterType `json:"tags_filter"` + // Filter by related groups that meet this criteria + GroupsFilter *GroupFilterType `json:"groups_filter"` + // Filter by related files that meet this criteria + FilesFilter *FileFilterType `json:"files_filter"` + // Filter by created at + CreatedAt *TimestampCriterionInput `json:"created_at"` + // Filter by updated at + UpdatedAt *TimestampCriterionInput `json:"updated_at"` + + // Filter by custom fields + CustomFields []CustomFieldCriterionInput `json:"custom_fields"` +} + +type AudioQueryOptions struct { + QueryOptions + AudioFilter *AudioFilterType + + TotalDuration bool + TotalSize bool +} + +type AudioQueryResult struct { + QueryResult[int] + TotalDuration float64 + TotalSize float64 + + getter AudioGetter + audios []*Audio + resolveErr error +} + +type AudioGroupInput struct { + GroupID string `json:"group_id"` + AudioIndex *int `json:"audio_index"` +} + +type AudioCreateInput struct { + Title *string `json:"title"` + Code *string `json:"code"` + Details *string `json:"details"` + URL *string `json:"url"` + Urls []string `json:"urls"` + Date *string `json:"date"` + Rating100 *int `json:"rating100"` + Organized *bool `json:"organized"` + StudioID *string `json:"studio_id"` + GalleryIds []string `json:"gallery_ids"` + PerformerIds []string `json:"performer_ids"` + Groups []AudioGroupInput `json:"groups"` + TagIds []string `json:"tag_ids"` + // This should be a URL or a base64 encoded data URL + CoverImage *string `json:"cover_image"` + // The first id will be assigned as primary. + // Files will be reassigned from existing audios if applicable. + // Files must not already be primary for another audio. + FileIds []string `json:"file_ids"` + CustomFields map[string]any `json:"custom_fields,omitempty"` +} + +type AudioUpdateInput struct { + ClientMutationID *string `json:"clientMutationId"` + ID string `json:"id"` + Title *string `json:"title"` + Code *string `json:"code"` + Details *string `json:"details"` + URL *string `json:"url"` + Urls []string `json:"urls"` + Date *string `json:"date"` + Rating100 *int `json:"rating100"` + OCounter *int `json:"o_counter"` + Organized *bool `json:"organized"` + StudioID *string `json:"studio_id"` + GalleryIds []string `json:"gallery_ids"` + PerformerIds []string `json:"performer_ids"` + Groups []AudioGroupInput `json:"groups"` + TagIds []string `json:"tag_ids"` + // This should be a URL or a base64 encoded data URL + CoverImage *string `json:"cover_image"` + ResumeTime *float64 `json:"resume_time"` + PlayDuration *float64 `json:"play_duration"` + PlayCount *int `json:"play_count"` + PrimaryFileID *string `json:"primary_file_id"` + CustomFields *CustomFieldsInput +} + +type AudioDestroyInput struct { + ID string `json:"id"` + DeleteFile *bool `json:"delete_file"` + DeleteGenerated *bool `json:"delete_generated"` + DestroyFileEntry *bool `json:"destroy_file_entry"` +} + +type AudiosDestroyInput struct { + Ids []string `json:"ids"` + DeleteFile *bool `json:"delete_file"` + DeleteGenerated *bool `json:"delete_generated"` + DestroyFileEntry *bool `json:"destroy_file_entry"` +} + +func NewAudioQueryResult(getter AudioGetter) *AudioQueryResult { + return &AudioQueryResult{ + getter: getter, + } +} + +func (r *AudioQueryResult) Resolve(ctx context.Context) ([]*Audio, error) { + // cache results + if r.audios == nil && r.resolveErr == nil { + r.audios, r.resolveErr = r.getter.FindMany(ctx, r.IDs) + } + return r.audios, r.resolveErr +} diff --git a/pkg/models/file.go b/pkg/models/file.go index 32263319c7..14f1669a4e 100644 --- a/pkg/models/file.go +++ b/pkg/models/file.go @@ -33,6 +33,7 @@ type FileFilterType struct { SceneCount *IntCriterionInput `json:"scene_count"` ImageCount *IntCriterionInput `json:"image_count"` GalleryCount *IntCriterionInput `json:"gallery_count"` + AudioFilter *AudioFilterType `json:"audio_filter"` ScenesFilter *SceneFilterType `json:"scenes_filter"` ImagesFilter *ImageFilterType `json:"images_filter"` GalleriesFilter *GalleryFilterType `json:"galleries_filter"` diff --git a/pkg/models/filename_parser.go b/pkg/models/filename_parser.go index 584ae72cbf..00aca11027 100644 --- a/pkg/models/filename_parser.go +++ b/pkg/models/filename_parser.go @@ -28,3 +28,32 @@ type SceneMovieID struct { MovieID string `json:"movie_id"` SceneIndex *string `json:"scene_index"` } + +// Audio + +type AudioParserInput struct { + IgnoreWords []string `json:"ignoreWords"` + WhitespaceCharacters *string `json:"whitespaceCharacters"` + CapitalizeTitle *bool `json:"capitalizeTitle"` + IgnoreOrganized *bool `json:"ignoreOrganized"` +} + +type AudioParserResult struct { + Audio *Audio `json:"scene"` + Title *string `json:"title"` + Code *string `json:"code"` + Details *string `json:"details"` + URL *string `json:"url"` + Date *string `json:"date"` + Rating *int `json:"rating"` + Rating100 *int `json:"rating100"` + StudioID *string `json:"studio_id"` + PerformerIds []string `json:"performer_ids"` + Groups []*AudioGroupID `json:"groups"` + TagIds []string `json:"tag_ids"` +} + +type AudioGroupID struct { + GroupID string `json:"group_id"` + AudioIndex *string `json:"scene_index"` +} diff --git a/pkg/models/jsonschema/audio.go b/pkg/models/jsonschema/audio.go new file mode 100644 index 0000000000..e7aee342ee --- /dev/null +++ b/pkg/models/jsonschema/audio.go @@ -0,0 +1,110 @@ +package jsonschema + +import ( + "fmt" + "os" + "strconv" + + jsoniter "github.com/json-iterator/go" + "github.com/stashapp/stash/pkg/fsutil" + "github.com/stashapp/stash/pkg/models/json" +) + +type AudioFile struct { + ModTime json.JSONTime `json:"mod_time,omitempty"` + Size string `json:"size"` + Duration string `json:"duration"` + AudioCodec string `json:"audio_codec"` + Format string `json:"format"` + Width int `json:"width"` + Height int `json:"height"` + Samplerate string `json:"samplerate"` + Bitrate int `json:"bitrate"` +} + +type AudioGroup struct { + GroupName string `json:"movieName,omitempty"` + AudioIndex int `json:"audio_index,omitempty"` +} + +type Audio struct { + Title string `json:"title,omitempty"` + Code string `json:"code,omitempty"` + Studio string `json:"studio,omitempty"` + + // deprecated - for import only + URL string `json:"url,omitempty"` + + URLs []string `json:"urls,omitempty"` + Date string `json:"date,omitempty"` + Rating int `json:"rating,omitempty"` + Organized bool `json:"organized,omitempty"` + + // deprecated - for import only + OCounter int `json:"o_counter,omitempty"` + + Details string `json:"details,omitempty"` + Galleries []GalleryRef `json:"galleries,omitempty"` + Performers []string `json:"performers,omitempty"` + Groups []AudioGroup `json:"movies,omitempty"` + Tags []string `json:"tags,omitempty"` + // Markers []AudioMarker `json:"markers,omitempty"` + Files []string `json:"files,omitempty"` + Cover string `json:"cover,omitempty"` + CreatedAt json.JSONTime `json:"created_at,omitempty"` + UpdatedAt json.JSONTime `json:"updated_at,omitempty"` + + // deprecated - for import only + LastPlayedAt json.JSONTime `json:"last_played_at,omitempty"` + + ResumeTime float64 `json:"resume_time,omitempty"` + + // deprecated - for import only + PlayCount int `json:"play_count,omitempty"` + + PlayHistory []json.JSONTime `json:"play_history,omitempty"` + OHistory []json.JSONTime `json:"o_history,omitempty"` + + PlayDuration float64 `json:"play_duration,omitempty"` + + CustomFields map[string]interface{} `json:"custom_fields,omitempty"` +} + +func (s Audio) Filename(id int, basename string, hash string) string { + ret := fsutil.SanitiseBasename(s.Title) + if ret == "" { + ret = basename + } + + if hash != "" { + ret += "." + hash + } else { + // audios may have no file and therefore no hash + ret += "." + strconv.Itoa(id) + } + + return ret + ".json" +} + +func LoadAudioFile(filePath string) (*Audio, error) { + var audio Audio + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer file.Close() + var json = jsoniter.ConfigCompatibleWithStandardLibrary + jsonParser := json.NewDecoder(file) + err = jsonParser.Decode(&audio) + if err != nil { + return nil, err + } + return &audio, nil +} + +func SaveAudioFile(filePath string, audio *Audio) error { + if audio == nil { + return fmt.Errorf("audio must not be nil") + } + return marshalToFile(filePath, audio) +} diff --git a/pkg/models/mocks/AudioReaderWriter.go b/pkg/models/mocks/AudioReaderWriter.go new file mode 100644 index 0000000000..d6a1d81a8e --- /dev/null +++ b/pkg/models/mocks/AudioReaderWriter.go @@ -0,0 +1,1465 @@ +// Code generated by mockery v2.10.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + models "github.com/stashapp/stash/pkg/models" + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// AudioReaderWriter is an autogenerated mock type for the AudioReaderWriter type +type AudioReaderWriter struct { + mock.Mock +} + +// AddFileID provides a mock function with given fields: ctx, id, fileID +func (_m *AudioReaderWriter) AddFileID(ctx context.Context, id int, fileID models.FileID) error { + ret := _m.Called(ctx, id, fileID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, models.FileID) error); ok { + r0 = rf(ctx, id, fileID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddGalleryIDs provides a mock function with given fields: ctx, audioID, galleryIDs +func (_m *AudioReaderWriter) AddGalleryIDs(ctx context.Context, audioID int, galleryIDs []int) error { + ret := _m.Called(ctx, audioID, galleryIDs) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, []int) error); ok { + r0 = rf(ctx, audioID, galleryIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddO provides a mock function with given fields: ctx, id, dates +func (_m *AudioReaderWriter) AddO(ctx context.Context, id int, dates []time.Time) ([]time.Time, error) { + ret := _m.Called(ctx, id, dates) + + var r0 []time.Time + if rf, ok := ret.Get(0).(func(context.Context, int, []time.Time) []time.Time); ok { + r0 = rf(ctx, id, dates) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, []time.Time) error); ok { + r1 = rf(ctx, id, dates) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AddViews provides a mock function with given fields: ctx, audioID, dates +func (_m *AudioReaderWriter) AddViews(ctx context.Context, audioID int, dates []time.Time) ([]time.Time, error) { + ret := _m.Called(ctx, audioID, dates) + + var r0 []time.Time + if rf, ok := ret.Get(0).(func(context.Context, int, []time.Time) []time.Time); ok { + r0 = rf(ctx, audioID, dates) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, []time.Time) error); ok { + r1 = rf(ctx, audioID, dates) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// All provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) All(ctx context.Context) ([]*models.Audio, error) { + ret := _m.Called(ctx) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context) []*models.Audio); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AssignFiles provides a mock function with given fields: ctx, audioID, fileID +func (_m *AudioReaderWriter) AssignFiles(ctx context.Context, audioID int, fileID []models.FileID) error { + ret := _m.Called(ctx, audioID, fileID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, []models.FileID) error); ok { + r0 = rf(ctx, audioID, fileID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Count provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) Count(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountAllViews provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) CountAllViews(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountByFileID provides a mock function with given fields: ctx, fileID +func (_m *AudioReaderWriter) CountByFileID(ctx context.Context, fileID models.FileID) (int, error) { + ret := _m.Called(ctx, fileID) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, models.FileID) int); ok { + r0 = rf(ctx, fileID) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, models.FileID) error); ok { + r1 = rf(ctx, fileID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountByPerformerID provides a mock function with given fields: ctx, performerID +func (_m *AudioReaderWriter) CountByPerformerID(ctx context.Context, performerID int) (int, error) { + ret := _m.Called(ctx, performerID) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, performerID) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountMissingChecksum provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) CountMissingChecksum(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountMissingOSHash provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) CountMissingOSHash(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountUniqueViews provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) CountUniqueViews(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountViews provides a mock function with given fields: ctx, id +func (_m *AudioReaderWriter) CountViews(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Create provides a mock function with given fields: ctx, newAudio, fileIDs +func (_m *AudioReaderWriter) Create(ctx context.Context, newAudio *models.Audio, fileIDs []models.FileID) error { + ret := _m.Called(ctx, newAudio, fileIDs) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *models.Audio, []models.FileID) error); ok { + r0 = rf(ctx, newAudio, fileIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteAllViews provides a mock function with given fields: ctx, id +func (_m *AudioReaderWriter) DeleteAllViews(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteO provides a mock function with given fields: ctx, id, dates +func (_m *AudioReaderWriter) DeleteO(ctx context.Context, id int, dates []time.Time) ([]time.Time, error) { + ret := _m.Called(ctx, id, dates) + + var r0 []time.Time + if rf, ok := ret.Get(0).(func(context.Context, int, []time.Time) []time.Time); ok { + r0 = rf(ctx, id, dates) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, []time.Time) error); ok { + r1 = rf(ctx, id, dates) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteViews provides a mock function with given fields: ctx, id, dates +func (_m *AudioReaderWriter) DeleteViews(ctx context.Context, id int, dates []time.Time) ([]time.Time, error) { + ret := _m.Called(ctx, id, dates) + + var r0 []time.Time + if rf, ok := ret.Get(0).(func(context.Context, int, []time.Time) []time.Time); ok { + r0 = rf(ctx, id, dates) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, []time.Time) error); ok { + r1 = rf(ctx, id, dates) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Destroy provides a mock function with given fields: ctx, id +func (_m *AudioReaderWriter) Destroy(ctx context.Context, id int) error { + ret := _m.Called(ctx, id) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Duration provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) Duration(ctx context.Context) (float64, error) { + ret := _m.Called(ctx) + + var r0 float64 + if rf, ok := ret.Get(0).(func(context.Context) float64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Find provides a mock function with given fields: ctx, id +func (_m *AudioReaderWriter) Find(ctx context.Context, id int) (*models.Audio, error) { + ret := _m.Called(ctx, id) + + var r0 *models.Audio + if rf, ok := ret.Get(0).(func(context.Context, int) *models.Audio); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByChecksum provides a mock function with given fields: ctx, checksum +func (_m *AudioReaderWriter) FindByChecksum(ctx context.Context, checksum string) ([]*models.Audio, error) { + ret := _m.Called(ctx, checksum) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Audio); ok { + r0 = rf(ctx, checksum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, checksum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByFileID provides a mock function with given fields: ctx, fileID +func (_m *AudioReaderWriter) FindByFileID(ctx context.Context, fileID models.FileID) ([]*models.Audio, error) { + ret := _m.Called(ctx, fileID) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, models.FileID) []*models.Audio); ok { + r0 = rf(ctx, fileID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, models.FileID) error); ok { + r1 = rf(ctx, fileID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByFingerprints provides a mock function with given fields: ctx, fp +func (_m *AudioReaderWriter) FindByFingerprints(ctx context.Context, fp []models.Fingerprint) ([]*models.Audio, error) { + ret := _m.Called(ctx, fp) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, []models.Fingerprint) []*models.Audio); ok { + r0 = rf(ctx, fp) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []models.Fingerprint) error); ok { + r1 = rf(ctx, fp) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByGalleryID provides a mock function with given fields: ctx, performerID +func (_m *AudioReaderWriter) FindByGalleryID(ctx context.Context, performerID int) ([]*models.Audio, error) { + ret := _m.Called(ctx, performerID) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Audio); ok { + r0 = rf(ctx, performerID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByGroupID provides a mock function with given fields: ctx, groupID +func (_m *AudioReaderWriter) FindByGroupID(ctx context.Context, groupID int) ([]*models.Audio, error) { + ret := _m.Called(ctx, groupID) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Audio); ok { + r0 = rf(ctx, groupID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, groupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByIDs provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) FindByIDs(ctx context.Context, ids []int) ([]*models.Audio, error) { + ret := _m.Called(ctx, ids) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Audio); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByOSHash provides a mock function with given fields: ctx, oshash +func (_m *AudioReaderWriter) FindByOSHash(ctx context.Context, oshash string) ([]*models.Audio, error) { + ret := _m.Called(ctx, oshash) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Audio); ok { + r0 = rf(ctx, oshash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, oshash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByPath provides a mock function with given fields: ctx, path +func (_m *AudioReaderWriter) FindByPath(ctx context.Context, path string) ([]*models.Audio, error) { + ret := _m.Called(ctx, path) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, string) []*models.Audio); ok { + r0 = rf(ctx, path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByPerformerID provides a mock function with given fields: ctx, performerID +func (_m *AudioReaderWriter) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Audio, error) { + ret := _m.Called(ctx, performerID) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.Audio); ok { + r0 = rf(ctx, performerID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindByPrimaryFileID provides a mock function with given fields: ctx, fileID +func (_m *AudioReaderWriter) FindByPrimaryFileID(ctx context.Context, fileID models.FileID) ([]*models.Audio, error) { + ret := _m.Called(ctx, fileID) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, models.FileID) []*models.Audio); ok { + r0 = rf(ctx, fileID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, models.FileID) error); ok { + r1 = rf(ctx, fileID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindDuplicates provides a mock function with given fields: ctx, distance, durationDiff +func (_m *AudioReaderWriter) FindDuplicates(ctx context.Context, distance int, durationDiff float64) ([][]*models.Audio, error) { + ret := _m.Called(ctx, distance, durationDiff) + + var r0 [][]*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, int, float64) [][]*models.Audio); ok { + r0 = rf(ctx, distance, durationDiff) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([][]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, float64) error); ok { + r1 = rf(ctx, distance, durationDiff) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindMany provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) FindMany(ctx context.Context, ids []int) ([]*models.Audio, error) { + ret := _m.Called(ctx, ids) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, []int) []*models.Audio); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAllOCount provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) GetAllOCount(ctx context.Context) (int, error) { + ret := _m.Called(ctx) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCover provides a mock function with given fields: ctx, audioID +func (_m *AudioReaderWriter) GetCover(ctx context.Context, audioID int) ([]byte, error) { + ret := _m.Called(ctx, audioID) + + var r0 []byte + if rf, ok := ret.Get(0).(func(context.Context, int) []byte); ok { + r0 = rf(ctx, audioID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, audioID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCustomFields provides a mock function with given fields: ctx, id +func (_m *AudioReaderWriter) GetCustomFields(ctx context.Context, id int) (map[string]interface{}, error) { + ret := _m.Called(ctx, id) + + var r0 map[string]interface{} + if rf, ok := ret.Get(0).(func(context.Context, int) map[string]interface{}); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]interface{}) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCustomFieldsBulk provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) GetCustomFieldsBulk(ctx context.Context, ids []int) ([]models.CustomFieldMap, error) { + ret := _m.Called(ctx, ids) + + var r0 []models.CustomFieldMap + if rf, ok := ret.Get(0).(func(context.Context, []int) []models.CustomFieldMap); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.CustomFieldMap) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFiles provides a mock function with given fields: ctx, relatedID +func (_m *AudioReaderWriter) GetFiles(ctx context.Context, relatedID int) ([]*models.AudioFile, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []*models.AudioFile + if rf, ok := ret.Get(0).(func(context.Context, int) []*models.AudioFile); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.AudioFile) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetGalleryIDs provides a mock function with given fields: ctx, relatedID +func (_m *AudioReaderWriter) GetGalleryIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetGroups provides a mock function with given fields: ctx, id +func (_m *AudioReaderWriter) GetGroups(ctx context.Context, id int) ([]models.GroupsAudios, error) { + ret := _m.Called(ctx, id) + + var r0 []models.GroupsAudios + if rf, ok := ret.Get(0).(func(context.Context, int) []models.GroupsAudios); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]models.GroupsAudios) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetManyFileIDs provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) GetManyFileIDs(ctx context.Context, ids []int) ([][]models.FileID, error) { + ret := _m.Called(ctx, ids) + + var r0 [][]models.FileID + if rf, ok := ret.Get(0).(func(context.Context, []int) [][]models.FileID); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([][]models.FileID) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetManyLastViewed provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) GetManyLastViewed(ctx context.Context, ids []int) ([]*time.Time, error) { + ret := _m.Called(ctx, ids) + + var r0 []*time.Time + if rf, ok := ret.Get(0).(func(context.Context, []int) []*time.Time); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetManyOCount provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) GetManyOCount(ctx context.Context, ids []int) ([]int, error) { + ret := _m.Called(ctx, ids) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, []int) []int); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetManyODates provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) GetManyODates(ctx context.Context, ids []int) ([][]time.Time, error) { + ret := _m.Called(ctx, ids) + + var r0 [][]time.Time + if rf, ok := ret.Get(0).(func(context.Context, []int) [][]time.Time); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([][]time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetManyViewCount provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) GetManyViewCount(ctx context.Context, ids []int) ([]int, error) { + ret := _m.Called(ctx, ids) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, []int) []int); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetManyViewDates provides a mock function with given fields: ctx, ids +func (_m *AudioReaderWriter) GetManyViewDates(ctx context.Context, ids []int) ([][]time.Time, error) { + ret := _m.Called(ctx, ids) + + var r0 [][]time.Time + if rf, ok := ret.Get(0).(func(context.Context, []int) [][]time.Time); ok { + r0 = rf(ctx, ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([][]time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int) error); ok { + r1 = rf(ctx, ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetOCount provides a mock function with given fields: ctx, id +func (_m *AudioReaderWriter) GetOCount(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetODates provides a mock function with given fields: ctx, relatedID +func (_m *AudioReaderWriter) GetODates(ctx context.Context, relatedID int) ([]time.Time, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []time.Time + if rf, ok := ret.Get(0).(func(context.Context, int) []time.Time); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPerformerIDs provides a mock function with given fields: ctx, relatedID +func (_m *AudioReaderWriter) GetPerformerIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTagIDs provides a mock function with given fields: ctx, relatedID +func (_m *AudioReaderWriter) GetTagIDs(ctx context.Context, relatedID int) ([]int, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []int + if rf, ok := ret.Get(0).(func(context.Context, int) []int); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetURLs provides a mock function with given fields: ctx, relatedID +func (_m *AudioReaderWriter) GetURLs(ctx context.Context, relatedID int) ([]string, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []string + if rf, ok := ret.Get(0).(func(context.Context, int) []string); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetViewDates provides a mock function with given fields: ctx, relatedID +func (_m *AudioReaderWriter) GetViewDates(ctx context.Context, relatedID int) ([]time.Time, error) { + ret := _m.Called(ctx, relatedID) + + var r0 []time.Time + if rf, ok := ret.Get(0).(func(context.Context, int) []time.Time); ok { + r0 = rf(ctx, relatedID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]time.Time) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, relatedID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HasCover provides a mock function with given fields: ctx, audioID +func (_m *AudioReaderWriter) HasCover(ctx context.Context, audioID int) (bool, error) { + ret := _m.Called(ctx, audioID) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context, int) bool); ok { + r0 = rf(ctx, audioID) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, audioID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OCountByGroupID provides a mock function with given fields: ctx, groupID +func (_m *AudioReaderWriter) OCountByGroupID(ctx context.Context, groupID int) (int, error) { + ret := _m.Called(ctx, groupID) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, groupID) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, groupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OCountByPerformerID provides a mock function with given fields: ctx, performerID +func (_m *AudioReaderWriter) OCountByPerformerID(ctx context.Context, performerID int) (int, error) { + ret := _m.Called(ctx, performerID) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, performerID) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, performerID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OCountByStudioID provides a mock function with given fields: ctx, studioID +func (_m *AudioReaderWriter) OCountByStudioID(ctx context.Context, studioID int) (int, error) { + ret := _m.Called(ctx, studioID) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, studioID) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, studioID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PlayDuration provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) PlayDuration(ctx context.Context) (float64, error) { + ret := _m.Called(ctx) + + var r0 float64 + if rf, ok := ret.Get(0).(func(context.Context) float64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Query provides a mock function with given fields: ctx, options +func (_m *AudioReaderWriter) Query(ctx context.Context, options models.AudioQueryOptions) (*models.AudioQueryResult, error) { + ret := _m.Called(ctx, options) + + var r0 *models.AudioQueryResult + if rf, ok := ret.Get(0).(func(context.Context, models.AudioQueryOptions) *models.AudioQueryResult); ok { + r0 = rf(ctx, options) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.AudioQueryResult) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, models.AudioQueryOptions) error); ok { + r1 = rf(ctx, options) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryCount provides a mock function with given fields: ctx, audioFilter, findFilter +func (_m *AudioReaderWriter) QueryCount(ctx context.Context, audioFilter *models.AudioFilterType, findFilter *models.FindFilterType) (int, error) { + ret := _m.Called(ctx, audioFilter, findFilter) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, *models.AudioFilterType, *models.FindFilterType) int); ok { + r0 = rf(ctx, audioFilter, findFilter) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *models.AudioFilterType, *models.FindFilterType) error); ok { + r1 = rf(ctx, audioFilter, findFilter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResetActivity provides a mock function with given fields: ctx, audioID, resetResume, resetDuration +func (_m *AudioReaderWriter) ResetActivity(ctx context.Context, audioID int, resetResume bool, resetDuration bool) (bool, error) { + ret := _m.Called(ctx, audioID, resetResume, resetDuration) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context, int, bool, bool) bool); ok { + r0 = rf(ctx, audioID, resetResume, resetDuration) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, bool, bool) error); ok { + r1 = rf(ctx, audioID, resetResume, resetDuration) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResetO provides a mock function with given fields: ctx, id +func (_m *AudioReaderWriter) ResetO(ctx context.Context, id int) (int, error) { + ret := _m.Called(ctx, id) + + var r0 int + if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SaveActivity provides a mock function with given fields: ctx, audioID, resumeTime, playDuration +func (_m *AudioReaderWriter) SaveActivity(ctx context.Context, audioID int, resumeTime *float64, playDuration *float64) (bool, error) { + ret := _m.Called(ctx, audioID, resumeTime, playDuration) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context, int, *float64, *float64) bool); ok { + r0 = rf(ctx, audioID, resumeTime, playDuration) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, *float64, *float64) error); ok { + r1 = rf(ctx, audioID, resumeTime, playDuration) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetCustomFields provides a mock function with given fields: ctx, id, fields +func (_m *AudioReaderWriter) SetCustomFields(ctx context.Context, id int, fields models.CustomFieldsInput) error { + ret := _m.Called(ctx, id, fields) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, models.CustomFieldsInput) error); ok { + r0 = rf(ctx, id, fields) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Size provides a mock function with given fields: ctx +func (_m *AudioReaderWriter) Size(ctx context.Context) (float64, error) { + ret := _m.Called(ctx) + + var r0 float64 + if rf, ok := ret.Get(0).(func(context.Context) float64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Update provides a mock function with given fields: ctx, updatedAudio +func (_m *AudioReaderWriter) Update(ctx context.Context, updatedAudio *models.Audio) error { + ret := _m.Called(ctx, updatedAudio) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *models.Audio) error); ok { + r0 = rf(ctx, updatedAudio) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateCover provides a mock function with given fields: ctx, audioID, cover +func (_m *AudioReaderWriter) UpdateCover(ctx context.Context, audioID int, cover []byte) error { + ret := _m.Called(ctx, audioID, cover) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok { + r0 = rf(ctx, audioID, cover) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdatePartial provides a mock function with given fields: ctx, id, updatedAudio +func (_m *AudioReaderWriter) UpdatePartial(ctx context.Context, id int, updatedAudio models.AudioPartial) (*models.Audio, error) { + ret := _m.Called(ctx, id, updatedAudio) + + var r0 *models.Audio + if rf, ok := ret.Get(0).(func(context.Context, int, models.AudioPartial) *models.Audio); ok { + r0 = rf(ctx, id, updatedAudio) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, models.AudioPartial) error); ok { + r1 = rf(ctx, id, updatedAudio) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Wall provides a mock function with given fields: ctx, q +func (_m *AudioReaderWriter) Wall(ctx context.Context, q *string) ([]*models.Audio, error) { + ret := _m.Called(ctx, q) + + var r0 []*models.Audio + if rf, ok := ret.Get(0).(func(context.Context, *string) []*models.Audio); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*models.Audio) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *string) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/pkg/models/mocks/database.go b/pkg/models/mocks/database.go index 88f106e192..58c219909e 100644 --- a/pkg/models/mocks/database.go +++ b/pkg/models/mocks/database.go @@ -18,6 +18,7 @@ type Database struct { Image *ImageReaderWriter Group *GroupReaderWriter Performer *PerformerReaderWriter + Audio *AudioReaderWriter Scene *SceneReaderWriter SceneMarker *SceneMarkerReaderWriter Studio *StudioReaderWriter @@ -67,6 +68,7 @@ func NewDatabase() *Database { Image: &ImageReaderWriter{}, Group: &GroupReaderWriter{}, Performer: &PerformerReaderWriter{}, + Audio: &AudioReaderWriter{}, Scene: &SceneReaderWriter{}, SceneMarker: &SceneMarkerReaderWriter{}, Studio: &StudioReaderWriter{}, @@ -83,6 +85,7 @@ func (db *Database) AssertExpectations(t mock.TestingT) { db.Image.AssertExpectations(t) db.Group.AssertExpectations(t) db.Performer.AssertExpectations(t) + db.Audio.AssertExpectations(t) db.Scene.AssertExpectations(t) db.SceneMarker.AssertExpectations(t) db.Studio.AssertExpectations(t) @@ -110,6 +113,7 @@ func (db *Database) Repository() models.Repository { Image: db.Image, Group: db.Group, Performer: db.Performer, + Audio: db.Audio, Scene: db.Scene, SceneMarker: db.SceneMarker, Studio: db.Studio, diff --git a/pkg/models/mocks/query.go b/pkg/models/mocks/query.go index abde51e65c..e6a70dd5f8 100644 --- a/pkg/models/mocks/query.go +++ b/pkg/models/mocks/query.go @@ -31,6 +31,31 @@ func SceneQueryResult(scenes []*models.Scene, count int) *models.SceneQueryResul return ret } +type audioResolver struct { + audios []*models.Audio +} + +func (s *audioResolver) Find(ctx context.Context, id int) (*models.Audio, error) { + panic("not implemented") +} + +func (s *audioResolver) FindMany(ctx context.Context, ids []int) ([]*models.Audio, error) { + return s.audios, nil +} + +func (s *audioResolver) FindByIDs(ctx context.Context, ids []int) ([]*models.Audio, error) { + return s.audios, nil +} + +func AudioQueryResult(audios []*models.Audio, count int) *models.AudioQueryResult { + ret := models.NewAudioQueryResult(&audioResolver{ + audios: audios, + }) + + ret.Count = count + return ret +} + type imageResolver struct { images []*models.Image } diff --git a/pkg/models/model_audio.go b/pkg/models/model_audio.go new file mode 100644 index 0000000000..ba42b1a1c8 --- /dev/null +++ b/pkg/models/model_audio.go @@ -0,0 +1,267 @@ +// TODO(audio): update this file + +package models + +import ( + "context" + "errors" + "path/filepath" + "strconv" + "time" +) + +// Audio stores the metadata for a single video audio. +type Audio struct { + ID int `json:"id"` + Title string `json:"title"` + Code string `json:"code"` + Details string `json:"details"` + Artists string `json:"artists"` + Date *Date `json:"date"` + // Rating expressed in 1-100 scale + Rating *int `json:"rating"` + Organized bool `json:"organized"` + StudioID *int `json:"studio_id"` + + // transient - not persisted + Files RelatedAudioFiles + PrimaryFileID *FileID + // transient - path of primary file - empty if no files + Path string + // transient - oshash of primary file - empty if no files + OSHash string + // transient - checksum of primary file - empty if no files + Checksum string + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + ResumeTime float64 `json:"resume_time"` + PlayDuration float64 `json:"play_duration"` + + URLs RelatedStrings `json:"urls"` + GalleryIDs RelatedIDs `json:"gallery_ids"` + TagIDs RelatedIDs `json:"tag_ids"` + PerformerIDs RelatedIDs `json:"performer_ids"` + Groups RelatedGroupsAudio `json:"groups"` +} + +func NewAudio() Audio { + currentTime := time.Now() + return Audio{ + CreatedAt: currentTime, + UpdatedAt: currentTime, + } +} + +type CreateAudioInput struct { + *Audio + + FileIDs []FileID + CoverImage []byte + CustomFields CustomFieldMap `json:"custom_fields"` +} + +type UpdateAudioInput struct { + *Audio + + CustomFields CustomFieldsInput `json:"custom_fields"` +} + +// AudioPartial represents part of a Audio object. It is used to update +// the database entry. +type AudioPartial struct { + Title OptionalString + Code OptionalString + Details OptionalString + Date OptionalDate + // Rating expressed in 1-100 scale + Rating OptionalInt + Organized OptionalBool + StudioID OptionalInt + CreatedAt OptionalTime + UpdatedAt OptionalTime + ResumeTime OptionalFloat64 + PlayDuration OptionalFloat64 + + URLs *UpdateStrings + GalleryIDs *UpdateIDs + TagIDs *UpdateIDs + PerformerIDs *UpdateIDs + GroupIDs *UpdateGroupIDsAudio + PrimaryFileID *FileID +} + +func NewAudioPartial() AudioPartial { + currentTime := time.Now() + return AudioPartial{ + UpdatedAt: NewOptionalTime(currentTime), + } +} + +func (s *Audio) LoadURLs(ctx context.Context, l URLLoader) error { + return s.URLs.load(func() ([]string, error) { + return l.GetURLs(ctx, s.ID) + }) +} + +func (s *Audio) LoadFiles(ctx context.Context, l AudioFileLoader) error { + return s.Files.load(func() ([]*AudioFile, error) { + return l.GetFiles(ctx, s.ID) + }) +} + +func (s *Audio) LoadPrimaryFile(ctx context.Context, l FileGetter) error { + return s.Files.loadPrimary(func() (*AudioFile, error) { + if s.PrimaryFileID == nil { + return nil, nil + } + + f, err := l.Find(ctx, *s.PrimaryFileID) + if err != nil { + return nil, err + } + + var vf *AudioFile + if len(f) > 0 { + var ok bool + vf, ok = f[0].(*AudioFile) + if !ok { + return nil, errors.New("not a video file") + } + } + return vf, nil + }) +} + +func (s *Audio) LoadGalleryIDs(ctx context.Context, l GalleryIDLoader) error { + return s.GalleryIDs.load(func() ([]int, error) { + return l.GetGalleryIDs(ctx, s.ID) + }) +} + +func (s *Audio) LoadPerformerIDs(ctx context.Context, l PerformerIDLoader) error { + return s.PerformerIDs.load(func() ([]int, error) { + return l.GetPerformerIDs(ctx, s.ID) + }) +} + +func (s *Audio) LoadTagIDs(ctx context.Context, l TagIDLoader) error { + return s.TagIDs.load(func() ([]int, error) { + return l.GetTagIDs(ctx, s.ID) + }) +} + +func (s *Audio) LoadGroups(ctx context.Context, l AudioGroupLoader) error { + return s.Groups.load(func() ([]GroupsAudios, error) { + return l.GetGroups(ctx, s.ID) + }) +} + +func (s *Audio) LoadRelationships(ctx context.Context, l AudioReader) error { + if err := s.LoadURLs(ctx, l); err != nil { + return err + } + + if err := s.LoadGalleryIDs(ctx, l); err != nil { + return err + } + + if err := s.LoadPerformerIDs(ctx, l); err != nil { + return err + } + + if err := s.LoadTagIDs(ctx, l); err != nil { + return err + } + + if err := s.LoadGroups(ctx, l); err != nil { + return err + } + + if err := s.LoadFiles(ctx, l); err != nil { + return err + } + + return nil +} + +// UpdateInput constructs a AudioUpdateInput using the populated fields in the AudioPartial object. +func (s AudioPartial) UpdateInput(id int) AudioUpdateInput { + var dateStr *string + if s.Date.Set { + d := s.Date.Value + v := d.String() + dateStr = &v + } + + ret := AudioUpdateInput{ + ID: strconv.Itoa(id), + Title: s.Title.Ptr(), + Code: s.Code.Ptr(), + Details: s.Details.Ptr(), + Urls: s.URLs.Strings(), + Date: dateStr, + Rating100: s.Rating.Ptr(), + Organized: s.Organized.Ptr(), + StudioID: s.StudioID.StringPtr(), + PerformerIds: s.PerformerIDs.IDStrings(), + Groups: s.GroupIDs.GroupInputs(), + TagIds: s.TagIDs.IDStrings(), + } + + return ret +} + +// GetTitle returns the title of the audio. If the Title field is empty, +// then the base filename is returned. +func (s Audio) GetTitle() string { + if s.Title != "" { + return s.Title + } + + return filepath.Base(s.Path) +} + +// DisplayName returns a display name for the audio for logging purposes. +// It returns Path if not empty, otherwise it returns the ID. +func (s Audio) DisplayName() string { + if s.Path != "" { + return s.Path + } + + return strconv.Itoa(s.ID) +} + +// GetHash returns the hash of the audio, based on the hash algorithm provided. If +// hash algorithm is MD5, then Checksum is returned. Otherwise, OSHash is returned. +func (s Audio) GetHash(hashAlgorithm HashAlgorithm) string { + switch hashAlgorithm { + case HashAlgorithmMd5: + return s.Checksum + case HashAlgorithmOshash: + return s.OSHash + } + + return "" +} + +// AudioFileType represents the file metadata for a audio. +type AudioFileType struct { + Size *string `graphql:"size" json:"size"` + Duration *float64 `graphql:"duration" json:"duration"` + AudioCodec *string `graphql:"audio_codec" json:"audio_codec"` + Samplerate *float64 `graphql:"samplerate" json:"samplerate"` + Bitrate *int `graphql:"bitrate" json:"bitrate"` +} + +// TODO(audio): don't know if we need this, using VideoCaption for now due to `pkg/models/repository_file.go` and `FileReader` using +// type AudioCaption struct { +// LanguageCode string `json:"language_code"` +// Filename string `json:"filename"` +// CaptionType string `json:"caption_type"` +// } + +// func (c AudioCaption) Path(filePath string) string { +// return filepath.Join(filepath.Dir(filePath), c.Filename) +// } diff --git a/pkg/models/model_file.go b/pkg/models/model_file.go index f6b8bdc517..3be19d2854 100644 --- a/pkg/models/model_file.go +++ b/pkg/models/model_file.go @@ -328,3 +328,43 @@ func (f VideoFile) FrameRateFinite() float64 { } return ret } + +// AudioFile is an extension of BaseFile to represent audio files. +type AudioFile struct { + *BaseFile + Format string `json:"format"` + Duration float64 `json:"duration"` + AudioCodec string `json:"audio_codec"` + SampleRate float64 `json:"sample_rate"` + BitRate int64 `json:"bitrate"` +} + +func (f AudioFile) GetFormat() string { + return f.Format +} + +func (f AudioFile) Clone() (ret File) { + clone := f + clone.BaseFile = f.BaseFile.Clone().(*BaseFile) + ret = &clone + return +} + +// #1572 - Inf and NaN values cause the JSON marshaller to fail +// Replace these values with 0 rather than erroring + +func (f AudioFile) DurationFinite() float64 { + ret := f.Duration + if math.IsInf(ret, 0) || math.IsNaN(ret) { + return 0 + } + return ret +} + +func (f AudioFile) SampleRateFinite() float64 { + ret := f.SampleRate + if math.IsInf(ret, 0) || math.IsNaN(ret) { + return 0 + } + return ret +} diff --git a/pkg/models/model_joins.go b/pkg/models/model_joins.go index c6cc8c2b22..2012b867e3 100644 --- a/pkg/models/model_joins.go +++ b/pkg/models/model_joins.go @@ -73,3 +73,49 @@ type GroupIDDescription struct { GroupID int `json:"group_id"` Description string `json:"description"` } + +// Audio +type GroupsAudios struct { + GroupID int `json:"group_id"` + AudioIndex *int `json:"audio_index"` +} + +func (s GroupsAudios) AudioGroupInput() AudioGroupInput { + return AudioGroupInput{ + GroupID: strconv.Itoa(s.GroupID), + AudioIndex: s.AudioIndex, + } +} + +func (s GroupsAudios) Equal(o GroupsAudios) bool { + return o.GroupID == s.GroupID && ((o.AudioIndex == nil && s.AudioIndex == nil) || + (o.AudioIndex != nil && s.AudioIndex != nil && *o.AudioIndex == *s.AudioIndex)) +} + +type UpdateGroupIDsAudio struct { + Groups []GroupsAudios `json:"groups"` + Mode RelationshipUpdateMode `json:"mode"` +} + +func (u *UpdateGroupIDsAudio) GroupInputs() []AudioGroupInput { + if u == nil { + return nil + } + + ret := make([]AudioGroupInput, 0, len(u.Groups)) + for _, id := range u.Groups { + ret = append(ret, id.AudioGroupInput()) + } + + return ret +} + +func (u *UpdateGroupIDsAudio) AddUnique(v GroupsAudios) { + for _, vv := range u.Groups { + if vv.GroupID == v.GroupID { + return + } + } + + u.Groups = append(u.Groups, v) +} diff --git a/pkg/models/paths/paths.go b/pkg/models/paths/paths.go index da72111cff..27d9b9b47a 100644 --- a/pkg/models/paths/paths.go +++ b/pkg/models/paths/paths.go @@ -11,6 +11,7 @@ type Paths struct { Generated *generatedPaths Scene *scenePaths + Audio *audioPaths SceneMarkers *sceneMarkerPaths Blobs string } diff --git a/pkg/models/paths/paths_audio.go b/pkg/models/paths/paths_audio.go new file mode 100644 index 0000000000..098976426a --- /dev/null +++ b/pkg/models/paths/paths_audio.go @@ -0,0 +1,56 @@ +// TODO(audio): update this file +package paths + +import ( + "path/filepath" + + "github.com/stashapp/stash/pkg/fsutil" +) + +type audioPaths struct { + generatedPaths +} + +func newAudioPaths(p Paths) *audioPaths { + sp := audioPaths{ + generatedPaths: *p.Generated, + } + return &sp +} + +func (sp *audioPaths) GetLegacyScreenshotPath(checksum string) string { + return filepath.Join(sp.Screenshots, checksum+".jpg") +} + +func (sp *audioPaths) GetTranscodePath(checksum string) string { + return filepath.Join(sp.Transcodes, checksum+".mp4") +} + +func (sp *audioPaths) GetStreamPath(audioPath string, checksum string) string { + transcodePath := sp.GetTranscodePath(checksum) + transcodeExists, _ := fsutil.FileExists(transcodePath) + if transcodeExists { + return transcodePath + } + return audioPath +} + +func (sp *audioPaths) GetVideoPreviewPath(checksum string) string { + return filepath.Join(sp.Screenshots, checksum+".mp4") +} + +func (sp *audioPaths) GetWebpPreviewPath(checksum string) string { + return filepath.Join(sp.Screenshots, checksum+".webp") +} + +func (sp *audioPaths) GetSpriteImageFilePath(checksum string) string { + return filepath.Join(sp.Vtt, checksum+"_sprite.jpg") +} + +func (sp *audioPaths) GetSpriteVttFilePath(checksum string) string { + return filepath.Join(sp.Vtt, checksum+"_thumbs.vtt") +} + +func (sp *audioPaths) GetInteractiveHeatmapPath(checksum string) string { + return filepath.Join(sp.InteractiveHeatmap, checksum+".png") +} diff --git a/pkg/models/relationships.go b/pkg/models/relationships.go index 5495f858b1..c0a22e025a 100644 --- a/pkg/models/relationships.go +++ b/pkg/models/relationships.go @@ -35,6 +35,10 @@ type FileIDLoader interface { GetManyFileIDs(ctx context.Context, ids []int) ([][]FileID, error) } +type AudioGroupLoader interface { + GetGroups(ctx context.Context, id int) ([]GroupsAudios, error) +} + type SceneGroupLoader interface { GetGroups(ctx context.Context, id int) ([]GroupsScenes, error) } @@ -54,6 +58,9 @@ type StashIDLoader interface { type VideoFileLoader interface { GetFiles(ctx context.Context, relatedID int) ([]*VideoFile, error) } +type AudioFileLoader interface { + GetFiles(ctx context.Context, relatedID int) ([]*AudioFile, error) +} type FileLoader interface { GetFiles(ctx context.Context, relatedID int) ([]File, error) @@ -195,6 +202,77 @@ func (r *RelatedGroups) load(fn func() ([]GroupsScenes, error)) error { return nil } +// Audio +// RelatedGroupsAudio represents a list of related Groups. +type RelatedGroupsAudio struct { + list []GroupsAudios +} + +// NewRelatedGroups returns a loaded RelateGroups object with the provided groups. +// Loaded will return true when called on the returned object if the provided slice is not nil. +func NewRelatedGroupsAudio(list []GroupsAudios) RelatedGroupsAudio { + return RelatedGroupsAudio{ + list: list, + } +} + +// Loaded returns true if the relationship has been loaded. +func (r RelatedGroupsAudio) Loaded() bool { + return r.list != nil +} + +func (r RelatedGroupsAudio) mustLoaded() { + if !r.Loaded() { + panic("list has not been loaded") + } +} + +// List returns the related Groups. Panics if the relationship has not been loaded. +func (r RelatedGroupsAudio) List() []GroupsAudios { + r.mustLoaded() + + return r.list +} + +// Add adds the provided ids to the list. Panics if the relationship has not been loaded. +func (r *RelatedGroupsAudio) Add(groups ...GroupsAudios) { + r.mustLoaded() + + r.list = append(r.list, groups...) +} + +// ForID returns the GroupsAudios object for the given group ID. Returns nil if not found. +func (r *RelatedGroupsAudio) ForID(id int) *GroupsAudios { + r.mustLoaded() + + for _, v := range r.list { + if v.GroupID == id { + return &v + } + } + + return nil +} + +func (r *RelatedGroupsAudio) load(fn func() ([]GroupsAudios, error)) error { + if r.Loaded() { + return nil + } + + ids, err := fn() + if err != nil { + return err + } + + if ids == nil { + ids = []GroupsAudios{} + } + + r.list = ids + + return nil +} + type RelatedGroupDescriptions struct { list []GroupIDDescription } @@ -430,6 +508,105 @@ func (r *RelatedVideoFiles) loadPrimary(fn func() (*VideoFile, error)) error { return nil } +// Audio + +type RelatedAudioFiles struct { + primaryFile *AudioFile + files []*AudioFile + primaryLoaded bool +} + +func NewRelatedAudioFiles(files []*AudioFile) RelatedAudioFiles { + ret := RelatedAudioFiles{ + files: files, + primaryLoaded: true, + } + + if len(files) > 0 { + ret.primaryFile = files[0] + } + + return ret +} + +func (r *RelatedAudioFiles) SetPrimary(f *AudioFile) { + r.primaryFile = f + r.primaryLoaded = true +} + +func (r *RelatedAudioFiles) Set(f []*AudioFile) { + r.files = f + if len(r.files) > 0 { + r.primaryFile = r.files[0] + } + + r.primaryLoaded = true +} + +// Loaded returns true if the relationship has been loaded. +func (r RelatedAudioFiles) Loaded() bool { + return r.files != nil +} + +// Loaded returns true if the primary file relationship has been loaded. +func (r RelatedAudioFiles) PrimaryLoaded() bool { + return r.primaryLoaded +} + +// List returns the related files. Panics if the relationship has not been loaded. +func (r RelatedAudioFiles) List() []*AudioFile { + if !r.Loaded() { + panic("relationship has not been loaded") + } + + return r.files +} + +// Primary returns the primary file. Panics if the relationship has not been loaded. +func (r RelatedAudioFiles) Primary() *AudioFile { + if !r.PrimaryLoaded() { + panic("relationship has not been loaded") + } + + return r.primaryFile +} + +func (r *RelatedAudioFiles) load(fn func() ([]*AudioFile, error)) error { + if r.Loaded() { + return nil + } + + var err error + r.files, err = fn() + if err != nil { + return err + } + + if len(r.files) > 0 { + r.primaryFile = r.files[0] + } + + r.primaryLoaded = true + + return nil +} + +func (r *RelatedAudioFiles) loadPrimary(fn func() (*AudioFile, error)) error { + if r.PrimaryLoaded() { + return nil + } + + var err error + r.primaryFile, err = fn() + if err != nil { + return err + } + + r.primaryLoaded = true + + return nil +} + type RelatedFiles struct { primaryFile File files []File diff --git a/pkg/models/repository.go b/pkg/models/repository.go index 9bd1e8cad4..f0c0b5f7af 100644 --- a/pkg/models/repository.go +++ b/pkg/models/repository.go @@ -22,6 +22,7 @@ type Repository struct { Image ImageReaderWriter Group GroupReaderWriter Performer PerformerReaderWriter + Audio AudioReaderWriter Scene SceneReaderWriter SceneMarker SceneMarkerReaderWriter Studio StudioReaderWriter diff --git a/pkg/models/repository_audio.go b/pkg/models/repository_audio.go new file mode 100644 index 0000000000..d96cf027fa --- /dev/null +++ b/pkg/models/repository_audio.go @@ -0,0 +1,122 @@ +// TODO(audio): update this file + +package models + +import ( + "context" + // "time" +) + +// AudioGetter provides methods to get audios by ID. +type AudioGetter interface { + // TODO - rename this to Find and remove existing method + FindMany(ctx context.Context, ids []int) ([]*Audio, error) + Find(ctx context.Context, id int) (*Audio, error) + // FindByIDs works the same way as FindMany, but it ignores any audios not found + // Audios are not guaranteed to be in the same order as the input + FindByIDs(ctx context.Context, ids []int) ([]*Audio, error) +} + +// AudioFinder provides methods to find audios. +type AudioFinder interface { + AudioGetter + FindByFingerprints(ctx context.Context, fp []Fingerprint) ([]*Audio, error) + FindByChecksum(ctx context.Context, checksum string) ([]*Audio, error) + FindByOSHash(ctx context.Context, oshash string) ([]*Audio, error) + FindByPath(ctx context.Context, path string) ([]*Audio, error) + FindByFileID(ctx context.Context, fileID FileID) ([]*Audio, error) + FindByPrimaryFileID(ctx context.Context, fileID FileID) ([]*Audio, error) + FindByPerformerID(ctx context.Context, performerID int) ([]*Audio, error) + FindByGalleryID(ctx context.Context, performerID int) ([]*Audio, error) + FindByGroupID(ctx context.Context, groupID int) ([]*Audio, error) + FindDuplicates(ctx context.Context, distance int, durationDiff float64) ([][]*Audio, error) +} + +// AudioQueryer provides methods to query audios. +type AudioQueryer interface { + Query(ctx context.Context, options AudioQueryOptions) (*AudioQueryResult, error) + QueryCount(ctx context.Context, audioFilter *AudioFilterType, findFilter *FindFilterType) (int, error) +} + +// AudioCounter provides methods to count audios. +type AudioCounter interface { + Count(ctx context.Context) (int, error) + CountByPerformerID(ctx context.Context, performerID int) (int, error) + CountByFileID(ctx context.Context, fileID FileID) (int, error) + CountMissingChecksum(ctx context.Context) (int, error) + CountMissingOSHash(ctx context.Context) (int, error) + OCountByPerformerID(ctx context.Context, performerID int) (int, error) + OCountByGroupID(ctx context.Context, groupID int) (int, error) + OCountByStudioID(ctx context.Context, studioID int) (int, error) +} + +// AudioCreator provides methods to create audios. +type AudioCreator interface { + Create(ctx context.Context, newAudio *Audio, fileIDs []FileID) error +} + +// AudioUpdater provides methods to update audios. +type AudioUpdater interface { + Update(ctx context.Context, updatedAudio *Audio) error + UpdatePartial(ctx context.Context, id int, updatedAudio AudioPartial) (*Audio, error) + UpdateCover(ctx context.Context, audioID int, cover []byte) error +} + +// AudioDestroyer provides methods to destroy audios. +type AudioDestroyer interface { + Destroy(ctx context.Context, id int) error +} + +type AudioCreatorUpdater interface { + AudioCreator + AudioUpdater +} + +// AudioReader provides all methods to read audios. +type AudioReader interface { + AudioFinder + AudioQueryer + AudioCounter + + URLLoader + ViewDateReader + ODateReader + FileIDLoader + GalleryIDLoader + PerformerIDLoader + TagIDLoader + AudioGroupLoader + AudioFileLoader + CustomFieldsReader + + All(ctx context.Context) ([]*Audio, error) + Wall(ctx context.Context, q *string) ([]*Audio, error) + Size(ctx context.Context) (float64, error) + Duration(ctx context.Context) (float64, error) + PlayDuration(ctx context.Context) (float64, error) + GetCover(ctx context.Context, audioID int) ([]byte, error) + HasCover(ctx context.Context, audioID int) (bool, error) +} + +// AudioWriter provides all methods to modify audios. +type AudioWriter interface { + AudioCreator + AudioUpdater + AudioDestroyer + + AddFileID(ctx context.Context, id int, fileID FileID) error + AddGalleryIDs(ctx context.Context, audioID int, galleryIDs []int) error + AssignFiles(ctx context.Context, audioID int, fileID []FileID) error + + OHistoryWriter + ViewHistoryWriter + SaveActivity(ctx context.Context, audioID int, resumeTime *float64, playDuration *float64) (bool, error) + ResetActivity(ctx context.Context, audioID int, resetResume bool, resetDuration bool) (bool, error) + CustomFieldsWriter +} + +// AudioReaderWriter provides all audio methods. +type AudioReaderWriter interface { + AudioReader + AudioWriter +} diff --git a/pkg/plugin/hook/hooks.go b/pkg/plugin/hook/hooks.go index a8235b1834..14b9e40961 100644 --- a/pkg/plugin/hook/hooks.go +++ b/pkg/plugin/hook/hooks.go @@ -14,6 +14,10 @@ const ( SceneUpdatePost TriggerEnum = "Scene.Update.Post" SceneDestroyPost TriggerEnum = "Scene.Destroy.Post" + AudioCreatePost TriggerEnum = "Audio.Create.Post" + AudioUpdatePost TriggerEnum = "Audio.Update.Post" + AudioDestroyPost TriggerEnum = "Audio.Destroy.Post" + ImageCreatePost TriggerEnum = "Image.Create.Post" ImageUpdatePost TriggerEnum = "Image.Update.Post" ImageDestroyPost TriggerEnum = "Image.Destroy.Post" diff --git a/pkg/plugin/hooks.go b/pkg/plugin/hooks.go index 1a40c52f18..59a9d1b850 100644 --- a/pkg/plugin/hooks.go +++ b/pkg/plugin/hooks.go @@ -31,6 +31,21 @@ type ScenesDestroyInput struct { Path string `json:"path"` } +// Audio +type AudioDestroyInput struct { + models.AudioDestroyInput + Checksum string `json:"checksum"` + OSHash string `json:"oshash"` + Path string `json:"path"` +} + +type AudiosDestroyInput struct { + models.AudiosDestroyInput + Checksum string `json:"checksum"` + OSHash string `json:"oshash"` + Path string `json:"path"` +} + type GalleryDestroyInput struct { models.GalleryDestroyInput Checksum string `json:"checksum"` diff --git a/pkg/scraper/stash.go b/pkg/scraper/stash.go index 23c4b90638..c7fdcb8887 100644 --- a/pkg/scraper/stash.go +++ b/pkg/scraper/stash.go @@ -1,3 +1,4 @@ +// TODO(audio): update this file package scraper import ( diff --git a/pkg/sqlite/audio.go b/pkg/sqlite/audio.go new file mode 100644 index 0000000000..fa82c4059c --- /dev/null +++ b/pkg/sqlite/audio.go @@ -0,0 +1,1454 @@ +// TODO(audio): update this file + +package sqlite + +import ( + "context" + "database/sql" + "errors" + "fmt" + "path/filepath" + "slices" + "sort" + "strconv" + "strings" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/exp" + "github.com/jmoiron/sqlx" + "gopkg.in/guregu/null.v4" + "gopkg.in/guregu/null.v4/zero" + + "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil" + "github.com/stashapp/stash/pkg/utils" +) + +const ( + audioTable = "audios" + audiosFilesTable = "audios_files" + audioIDColumn = "audio_id" + audioDateColumn = "date" + performersAudiosTable = "performers_audios" + audiosTagsTable = "audios_tags" + audiosGalleriesTable = "audios_galleries" + groupsAudiosTable = "groups_audios" + audiosURLsTable = "audio_urls" + audioURLColumn = "url" + audiosViewDatesTable = "audios_view_dates" + audioViewDateColumn = "view_date" + audiosODatesTable = "audios_o_dates" + audioODateColumn = "o_date" + + audioCoverBlobColumn = "cover_blob" +) + +var findExactDuplicateQuery = ` +SELECT GROUP_CONCAT(DISTINCT audio_id) as ids +FROM ( + SELECT audios.id as audio_id + , audio_files.duration as file_duration + , files.size as file_size + , files_fingerprints.fingerprint as phash + , abs(max(audio_files.duration) OVER (PARTITION by files_fingerprints.fingerprint) - audio_files.duration) as durationDiff + FROM audios + INNER JOIN audios_files ON (audios.id = audios_files.audio_id) + INNER JOIN files ON (audios_files.file_id = files.id) + INNER JOIN files_fingerprints ON (audios_files.file_id = files_fingerprints.file_id AND files_fingerprints.type = 'phash') + INNER JOIN audio_files ON (files.id == audio_files.file_id) +) +WHERE durationDiff <= ?1 + OR ?1 < 0 -- Always TRUE if the parameter is negative. + -- That will disable the durationDiff checking. +GROUP BY phash +HAVING COUNT(phash) > 1 + AND COUNT(DISTINCT audio_id) > 1 +ORDER BY SUM(file_size) DESC; +` + +var findAllPhashesQuery = ` +SELECT audios.id as id + , files_fingerprints.fingerprint as phash + , audio_files.duration as duration +FROM audios +INNER JOIN audios_files ON (audios.id = audios_files.audio_id) +INNER JOIN files ON (audios_files.file_id = files.id) +INNER JOIN files_fingerprints ON (audios_files.file_id = files_fingerprints.file_id AND files_fingerprints.type = 'phash') +INNER JOIN audio_files ON (files.id == audio_files.file_id) +ORDER BY files.size DESC; +` + +type audioRow struct { + ID int `db:"id" goqu:"skipinsert"` + Title zero.String `db:"title"` + Code zero.String `db:"code"` + Details zero.String `db:"details"` + Director zero.String `db:"director"` + Date NullDate `db:"date"` + DatePrecision null.Int `db:"date_precision"` + // expressed as 1-100 + Rating null.Int `db:"rating"` + Organized bool `db:"organized"` + StudioID null.Int `db:"studio_id,omitempty"` + CreatedAt Timestamp `db:"created_at"` + UpdatedAt Timestamp `db:"updated_at"` + ResumeTime float64 `db:"resume_time"` + PlayDuration float64 `db:"play_duration"` + + // not used in resolutions or updates + CoverBlob zero.String `db:"cover_blob"` +} + +func (r *audioRow) fromAudio(o models.Audio) { + r.ID = o.ID + r.Title = zero.StringFrom(o.Title) + r.Code = zero.StringFrom(o.Code) + r.Details = zero.StringFrom(o.Details) + r.Director = zero.StringFrom(o.Director) + r.Date = NullDateFromDatePtr(o.Date) + r.DatePrecision = datePrecisionFromDatePtr(o.Date) + r.Rating = intFromPtr(o.Rating) + r.Organized = o.Organized + r.StudioID = intFromPtr(o.StudioID) + r.CreatedAt = Timestamp{Timestamp: o.CreatedAt} + r.UpdatedAt = Timestamp{Timestamp: o.UpdatedAt} + r.ResumeTime = o.ResumeTime + r.PlayDuration = o.PlayDuration +} + +type audioQueryRow struct { + audioRow + PrimaryFileID null.Int `db:"primary_file_id"` + PrimaryFileFolderPath zero.String `db:"primary_file_folder_path"` + PrimaryFileBasename zero.String `db:"primary_file_basename"` + PrimaryFileOshash zero.String `db:"primary_file_oshash"` + PrimaryFileChecksum zero.String `db:"primary_file_checksum"` +} + +func (r *audioQueryRow) resolve() *models.Audio { + ret := &models.Audio{ + ID: r.ID, + Title: r.Title.String, + Code: r.Code.String, + Details: r.Details.String, + Director: r.Director.String, + Date: r.Date.DatePtr(r.DatePrecision), + Rating: nullIntPtr(r.Rating), + Organized: r.Organized, + StudioID: nullIntPtr(r.StudioID), + + PrimaryFileID: nullIntFileIDPtr(r.PrimaryFileID), + OSHash: r.PrimaryFileOshash.String, + Checksum: r.PrimaryFileChecksum.String, + + CreatedAt: r.CreatedAt.Timestamp, + UpdatedAt: r.UpdatedAt.Timestamp, + + ResumeTime: r.ResumeTime, + PlayDuration: r.PlayDuration, + } + + if r.PrimaryFileFolderPath.Valid && r.PrimaryFileBasename.Valid { + ret.Path = filepath.Join(r.PrimaryFileFolderPath.String, r.PrimaryFileBasename.String) + } + + return ret +} + +type audioRowRecord struct { + updateRecord +} + +func (r *audioRowRecord) fromPartial(o models.AudioPartial) { + r.setNullString("title", o.Title) + r.setNullString("code", o.Code) + r.setNullString("details", o.Details) + r.setNullString("director", o.Director) + r.setNullDate("date", "date_precision", o.Date) + r.setNullInt("rating", o.Rating) + r.setBool("organized", o.Organized) + r.setNullInt("studio_id", o.StudioID) + r.setTimestamp("created_at", o.CreatedAt) + r.setTimestamp("updated_at", o.UpdatedAt) + r.setFloat64("resume_time", o.ResumeTime) + r.setFloat64("play_duration", o.PlayDuration) +} + +type audioRepositoryType struct { + repository + galleries joinRepository + tags joinRepository + performers joinRepository + groups repository + + files filesRepository +} + +var ( + audioRepository = audioRepositoryType{ + repository: repository{ + tableName: audioTable, + idColumn: idColumn, + }, + galleries: joinRepository{ + repository: repository{ + tableName: audiosGalleriesTable, + idColumn: audioIDColumn, + }, + fkColumn: galleryIDColumn, + }, + tags: joinRepository{ + repository: repository{ + tableName: audiosTagsTable, + idColumn: audioIDColumn, + }, + fkColumn: tagIDColumn, + foreignTable: tagTable, + orderBy: tagTableSortSQL, + }, + performers: joinRepository{ + repository: repository{ + tableName: performersAudiosTable, + idColumn: audioIDColumn, + }, + fkColumn: performerIDColumn, + }, + groups: repository{ + tableName: groupsAudiosTable, + idColumn: audioIDColumn, + }, + files: filesRepository{ + repository: repository{ + tableName: audiosFilesTable, + idColumn: audioIDColumn, + }, + }, + } +) + +type AudioStore struct { + blobJoinQueryBuilder + customFieldsStore + + tableMgr *table + oDateManager + viewDateManager + + repo *storeRepository +} + +func NewAudioStore(r *storeRepository, blobStore *BlobStore) *AudioStore { + return &AudioStore{ + blobJoinQueryBuilder: blobJoinQueryBuilder{ + blobStore: blobStore, + joinTable: audioTable, + }, + customFieldsStore: customFieldsStore{ + table: audiosCustomFieldsTable, + fk: audiosCustomFieldsTable.Col(audioIDColumn), + }, + + tableMgr: audioTableMgr, + viewDateManager: viewDateManager{audiosViewTableMgr}, + oDateManager: oDateManager{audiosOTableMgr}, + repo: r, + } +} + +func (qb *AudioStore) table() exp.IdentifierExpression { + return qb.tableMgr.table +} + +func (qb *AudioStore) selectDataset() *goqu.SelectDataset { + table := qb.table() + files := fileTableMgr.table + folders := folderTableMgr.table + checksum := fingerprintTableMgr.table.As("fingerprint_md5") + oshash := fingerprintTableMgr.table.As("fingerprint_oshash") + + return dialect.From(table).LeftJoin( + audiosFilesJoinTable, + goqu.On( + audiosFilesJoinTable.Col(audioIDColumn).Eq(table.Col(idColumn)), + audiosFilesJoinTable.Col("primary").Eq(1), + ), + ).LeftJoin( + files, + goqu.On(files.Col(idColumn).Eq(audiosFilesJoinTable.Col(fileIDColumn))), + ).LeftJoin( + folders, + goqu.On(folders.Col(idColumn).Eq(files.Col("parent_folder_id"))), + ).LeftJoin( + checksum, + goqu.On( + checksum.Col(fileIDColumn).Eq(audiosFilesJoinTable.Col(fileIDColumn)), + checksum.Col("type").Eq(models.FingerprintTypeMD5), + ), + ).LeftJoin( + oshash, + goqu.On( + oshash.Col(fileIDColumn).Eq(audiosFilesJoinTable.Col(fileIDColumn)), + oshash.Col("type").Eq(models.FingerprintTypeOshash), + ), + ).Select( + qb.table().All(), + audiosFilesJoinTable.Col(fileIDColumn).As("primary_file_id"), + folders.Col("path").As("primary_file_folder_path"), + files.Col("basename").As("primary_file_basename"), + checksum.Col("fingerprint").As("primary_file_checksum"), + oshash.Col("fingerprint").As("primary_file_oshash"), + ) +} + +func (qb *AudioStore) Create(ctx context.Context, newObject *models.Audio, fileIDs []models.FileID) error { + var r audioRow + r.fromAudio(*newObject) + + id, err := qb.tableMgr.insertID(ctx, r) + if err != nil { + return err + } + + if len(fileIDs) > 0 { + const firstPrimary = true + if err := audiosFilesTableMgr.insertJoins(ctx, id, firstPrimary, fileIDs); err != nil { + return err + } + } + + if newObject.URLs.Loaded() { + const startPos = 0 + if err := audiosURLsTableMgr.insertJoins(ctx, id, startPos, newObject.URLs.List()); err != nil { + return err + } + } + + if newObject.PerformerIDs.Loaded() { + if err := audiosPerformersTableMgr.insertJoins(ctx, id, newObject.PerformerIDs.List()); err != nil { + return err + } + } + if newObject.TagIDs.Loaded() { + if err := audiosTagsTableMgr.insertJoins(ctx, id, newObject.TagIDs.List()); err != nil { + return err + } + } + + if newObject.GalleryIDs.Loaded() { + if err := audiosGalleriesTableMgr.insertJoins(ctx, id, newObject.GalleryIDs.List()); err != nil { + return err + } + } + + if newObject.Groups.Loaded() { + if err := audiosGroupsTableMgr.insertJoins(ctx, id, newObject.Groups.List()); err != nil { + return err + } + } + + updated, err := qb.find(ctx, id) + if err != nil { + return fmt.Errorf("finding after create: %w", err) + } + + *newObject = *updated + + return nil +} + +func (qb *AudioStore) UpdatePartial(ctx context.Context, id int, partial models.AudioPartial) (*models.Audio, error) { + r := audioRowRecord{ + updateRecord{ + Record: make(exp.Record), + }, + } + + r.fromPartial(partial) + + if len(r.Record) > 0 { + if err := qb.tableMgr.updateByID(ctx, id, r.Record); err != nil { + return nil, err + } + } + + if partial.URLs != nil { + if err := audiosURLsTableMgr.modifyJoins(ctx, id, partial.URLs.Values, partial.URLs.Mode); err != nil { + return nil, err + } + } + if partial.PerformerIDs != nil { + if err := audiosPerformersTableMgr.modifyJoins(ctx, id, partial.PerformerIDs.IDs, partial.PerformerIDs.Mode); err != nil { + return nil, err + } + } + if partial.TagIDs != nil { + if err := audiosTagsTableMgr.modifyJoins(ctx, id, partial.TagIDs.IDs, partial.TagIDs.Mode); err != nil { + return nil, err + } + } + if partial.GalleryIDs != nil { + if err := audiosGalleriesTableMgr.modifyJoins(ctx, id, partial.GalleryIDs.IDs, partial.GalleryIDs.Mode); err != nil { + return nil, err + } + } + if partial.GroupIDs != nil { + if err := audiosGroupsTableMgr.modifyJoins(ctx, id, partial.GroupIDs.Groups, partial.GroupIDs.Mode); err != nil { + return nil, err + } + } + if partial.PrimaryFileID != nil { + if err := audiosFilesTableMgr.setPrimary(ctx, id, *partial.PrimaryFileID); err != nil { + return nil, err + } + } + + return qb.find(ctx, id) +} + +func (qb *AudioStore) Update(ctx context.Context, updatedObject *models.Audio) error { + var r audioRow + r.fromAudio(*updatedObject) + + if err := qb.tableMgr.updateByID(ctx, updatedObject.ID, r); err != nil { + return err + } + + if updatedObject.URLs.Loaded() { + if err := audiosURLsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.URLs.List()); err != nil { + return err + } + } + + if updatedObject.PerformerIDs.Loaded() { + if err := audiosPerformersTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.PerformerIDs.List()); err != nil { + return err + } + } + + if updatedObject.TagIDs.Loaded() { + if err := audiosTagsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.TagIDs.List()); err != nil { + return err + } + } + + if updatedObject.GalleryIDs.Loaded() { + if err := audiosGalleriesTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.GalleryIDs.List()); err != nil { + return err + } + } + + if updatedObject.Groups.Loaded() { + if err := audiosGroupsTableMgr.replaceJoins(ctx, updatedObject.ID, updatedObject.Groups.List()); err != nil { + return err + } + } + + if updatedObject.Files.Loaded() { + fileIDs := make([]models.FileID, len(updatedObject.Files.List())) + for i, f := range updatedObject.Files.List() { + fileIDs[i] = f.ID + } + + if err := audiosFilesTableMgr.replaceJoins(ctx, updatedObject.ID, fileIDs); err != nil { + return err + } + } + + return nil +} + +func (qb *AudioStore) Destroy(ctx context.Context, id int) error { + // must handle image checksums manually + if err := qb.destroyCover(ctx, id); err != nil { + return err + } + + // audio markers should be handled prior to calling destroy + // galleries should be handled prior to calling destroy + + return qb.tableMgr.destroyExisting(ctx, []int{id}) +} + +// returns nil, nil if not found +func (qb *AudioStore) Find(ctx context.Context, id int) (*models.Audio, error) { + ret, err := qb.find(ctx, id) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return ret, err +} + +// FindByIDs finds multiple audios by their IDs. +// No check is made to see if the audios exist, and the order of the returned audios +// is not guaranteed to be the same as the order of the input IDs. +func (qb *AudioStore) FindByIDs(ctx context.Context, ids []int) ([]*models.Audio, error) { + audios := make([]*models.Audio, 0, len(ids)) + + table := qb.table() + if err := batchExec(ids, defaultBatchSize, func(batch []int) error { + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) + unsorted, err := qb.getMany(ctx, q) + if err != nil { + return err + } + + audios = append(audios, unsorted...) + + return nil + }); err != nil { + return nil, err + } + + return audios, nil +} + +func (qb *AudioStore) FindMany(ctx context.Context, ids []int) ([]*models.Audio, error) { + audios := make([]*models.Audio, len(ids)) + + unsorted, err := qb.FindByIDs(ctx, ids) + if err != nil { + return nil, err + } + + for _, s := range unsorted { + i := slices.Index(ids, s.ID) + audios[i] = s + } + + for i := range audios { + if audios[i] == nil { + return nil, fmt.Errorf("audio with id %d not found", ids[i]) + } + } + + return audios, nil +} + +// returns nil, sql.ErrNoRows if not found +func (qb *AudioStore) find(ctx context.Context, id int) (*models.Audio, error) { + q := qb.selectDataset().Where(qb.tableMgr.byID(id)) + + ret, err := qb.get(ctx, q) + if err != nil { + return nil, err + } + + return ret, nil +} + +func (qb *AudioStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Audio, error) { + table := qb.table() + + q := qb.selectDataset().Where( + table.Col(idColumn).Eq( + sq, + ), + ) + + return qb.getMany(ctx, q) +} + +// returns nil, sql.ErrNoRows if not found +func (qb *AudioStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Audio, error) { + ret, err := qb.getMany(ctx, q) + if err != nil { + return nil, err + } + + if len(ret) == 0 { + return nil, sql.ErrNoRows + } + + return ret[0], nil +} + +func (qb *AudioStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Audio, error) { + const single = false + var ret []*models.Audio + var lastID int + if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { + var f audioQueryRow + if err := r.StructScan(&f); err != nil { + return err + } + + s := f.resolve() + if s.ID == lastID { + return fmt.Errorf("internal error: multiple rows returned for single audio id %d", s.ID) + } + lastID = s.ID + + ret = append(ret, s) + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (qb *AudioStore) GetFiles(ctx context.Context, id int) ([]*models.AudioFile, error) { + fileIDs, err := audioRepository.files.get(ctx, id) + if err != nil { + return nil, err + } + + // use fileStore to load files + files, err := qb.repo.File.Find(ctx, fileIDs...) + if err != nil { + return nil, err + } + + ret := make([]*models.AudioFile, len(files)) + for i, f := range files { + var ok bool + ret[i], ok = f.(*models.AudioFile) + if !ok { + return nil, fmt.Errorf("expected file to be *file.AudioFile not %T", f) + } + } + + return ret, nil +} + +func (qb *AudioStore) GetManyFileIDs(ctx context.Context, ids []int) ([][]models.FileID, error) { + const primaryOnly = false + return audioRepository.files.getMany(ctx, ids, primaryOnly) +} + +func (qb *AudioStore) FindByFileID(ctx context.Context, fileID models.FileID) ([]*models.Audio, error) { + sq := dialect.From(audiosFilesJoinTable).Select(audiosFilesJoinTable.Col(audioIDColumn)).Where( + audiosFilesJoinTable.Col(fileIDColumn).Eq(fileID), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting audios by file id %d: %w", fileID, err) + } + + return ret, nil +} + +func (qb *AudioStore) FindByPrimaryFileID(ctx context.Context, fileID models.FileID) ([]*models.Audio, error) { + sq := dialect.From(audiosFilesJoinTable).Select(audiosFilesJoinTable.Col(audioIDColumn)).Where( + audiosFilesJoinTable.Col(fileIDColumn).Eq(fileID), + audiosFilesJoinTable.Col("primary").Eq(1), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting audios by primary file id %d: %w", fileID, err) + } + + return ret, nil +} + +func (qb *AudioStore) CountByFileID(ctx context.Context, fileID models.FileID) (int, error) { + joinTable := audiosFilesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(fileIDColumn).Eq(fileID)) + return count(ctx, q) +} + +func (qb *AudioStore) FindByFingerprints(ctx context.Context, fp []models.Fingerprint) ([]*models.Audio, error) { + fingerprintTable := fingerprintTableMgr.table + + var ex []exp.Expression + + for _, v := range fp { + ex = append(ex, goqu.And( + fingerprintTable.Col("type").Eq(v.Type), + fingerprintTable.Col("fingerprint").Eq(v.Fingerprint), + )) + } + + sq := dialect.From(audiosFilesJoinTable). + InnerJoin( + fingerprintTable, + goqu.On(fingerprintTable.Col(fileIDColumn).Eq(audiosFilesJoinTable.Col(fileIDColumn))), + ). + Select(audiosFilesJoinTable.Col(audioIDColumn)).Where(goqu.Or(ex...)) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil { + return nil, fmt.Errorf("getting audios by fingerprints: %w", err) + } + + return ret, nil +} + +func (qb *AudioStore) FindByChecksum(ctx context.Context, checksum string) ([]*models.Audio, error) { + return qb.FindByFingerprints(ctx, []models.Fingerprint{ + { + Type: models.FingerprintTypeMD5, + Fingerprint: checksum, + }, + }) +} + +func (qb *AudioStore) FindByOSHash(ctx context.Context, oshash string) ([]*models.Audio, error) { + return qb.FindByFingerprints(ctx, []models.Fingerprint{ + { + Type: models.FingerprintTypeOshash, + Fingerprint: oshash, + }, + }) +} + +func (qb *AudioStore) FindByPath(ctx context.Context, p string) ([]*models.Audio, error) { + filesTable := fileTableMgr.table + foldersTable := folderTableMgr.table + basename := filepath.Base(p) + dir := filepath.Dir(p) + + // replace wildcards + basename = strings.ReplaceAll(basename, "*", "%") + dir = strings.ReplaceAll(dir, "*", "%") + + sq := dialect.From(audiosFilesJoinTable).InnerJoin( + filesTable, + goqu.On(filesTable.Col(idColumn).Eq(audiosFilesJoinTable.Col(fileIDColumn))), + ).InnerJoin( + foldersTable, + goqu.On(foldersTable.Col(idColumn).Eq(filesTable.Col("parent_folder_id"))), + ).Select(audiosFilesJoinTable.Col(audioIDColumn)).Where( + foldersTable.Col("path").Like(dir), + filesTable.Col("basename").Like(basename), + ) + + ret, err := qb.findBySubquery(ctx, sq) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("getting audio by path %s: %w", p, err) + } + + return ret, nil +} + +func (qb *AudioStore) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Audio, error) { + sq := dialect.From(audiosPerformersJoinTable).Select(audiosPerformersJoinTable.Col(audioIDColumn)).Where( + audiosPerformersJoinTable.Col(performerIDColumn).Eq(performerID), + ) + ret, err := qb.findBySubquery(ctx, sq) + + if err != nil { + return nil, fmt.Errorf("getting audios for performer %d: %w", performerID, err) + } + + return ret, nil +} + +func (qb *AudioStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Audio, error) { + sq := dialect.From(galleriesAudiosJoinTable).Select(galleriesAudiosJoinTable.Col(audioIDColumn)).Where( + galleriesAudiosJoinTable.Col(galleryIDColumn).Eq(galleryID), + ) + ret, err := qb.findBySubquery(ctx, sq) + + if err != nil { + return nil, fmt.Errorf("getting audios for gallery %d: %w", galleryID, err) + } + + return ret, nil +} + +func (qb *AudioStore) CountByPerformerID(ctx context.Context, performerID int) (int, error) { + joinTable := audiosPerformersJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(performerIDColumn).Eq(performerID)) + return count(ctx, q) +} + +func (qb *AudioStore) OCountByPerformerID(ctx context.Context, performerID int) (int, error) { + table := qb.table() + joinTable := audiosPerformersJoinTable + oHistoryTable := goqu.T(audiosODatesTable) + + q := dialect.Select(goqu.COUNT("*")).From(table).InnerJoin( + oHistoryTable, + goqu.On(table.Col(idColumn).Eq(oHistoryTable.Col(audioIDColumn))), + ).InnerJoin( + joinTable, + goqu.On( + table.Col(idColumn).Eq(joinTable.Col(audioIDColumn)), + ), + ).Where(joinTable.Col(performerIDColumn).Eq(performerID)) + + var ret int + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *AudioStore) OCountByGroupID(ctx context.Context, groupID int) (int, error) { + table := qb.table() + joinTable := audiosGroupsJoinTable + oHistoryTable := goqu.T(audiosODatesTable) + + q := dialect.Select(goqu.COUNT("*")).From(table).InnerJoin( + oHistoryTable, + goqu.On(table.Col(idColumn).Eq(oHistoryTable.Col(audioIDColumn))), + ).InnerJoin( + joinTable, + goqu.On( + table.Col(idColumn).Eq(joinTable.Col(audioIDColumn)), + ), + ).Where(joinTable.Col(groupIDColumn).Eq(groupID)) + + var ret int + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *AudioStore) OCountByStudioID(ctx context.Context, studioID int) (int, error) { + table := qb.table() + oHistoryTable := goqu.T(audiosODatesTable) + + q := dialect.Select(goqu.COUNT("*")).From(table).InnerJoin( + oHistoryTable, + goqu.On(table.Col(idColumn).Eq(oHistoryTable.Col(audioIDColumn))), + ).Where(table.Col(studioIDColumn).Eq(studioID)) + + var ret int + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *AudioStore) FindByGroupID(ctx context.Context, groupID int) ([]*models.Audio, error) { + sq := dialect.From(audiosGroupsJoinTable).Select(audiosGroupsJoinTable.Col(audioIDColumn)).Where( + audiosGroupsJoinTable.Col(groupIDColumn).Eq(groupID), + ) + ret, err := qb.findBySubquery(ctx, sq) + + if err != nil { + return nil, fmt.Errorf("getting audios for group %d: %w", groupID, err) + } + + return ret, nil +} + +func (qb *AudioStore) Count(ctx context.Context) (int, error) { + q := dialect.Select(goqu.COUNT("*")).From(qb.table()) + return count(ctx, q) +} + +func (qb *AudioStore) Size(ctx context.Context) (float64, error) { + table := qb.table() + fileTable := fileTableMgr.table + q := dialect.Select( + goqu.COALESCE(goqu.SUM(fileTableMgr.table.Col("size")), 0), + ).From(table).InnerJoin( + audiosFilesJoinTable, + goqu.On(table.Col(idColumn).Eq(audiosFilesJoinTable.Col(audioIDColumn))), + ).InnerJoin( + fileTable, + goqu.On(audiosFilesJoinTable.Col(fileIDColumn).Eq(fileTable.Col(idColumn))), + ) + var ret float64 + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *AudioStore) Duration(ctx context.Context) (float64, error) { + table := qb.table() + AudioFileTable := AudioFileTableMgr.table + + q := dialect.Select( + goqu.COALESCE(goqu.SUM(AudioFileTable.Col("duration")), 0), + ).From(table).InnerJoin( + audiosFilesJoinTable, + goqu.On(audiosFilesJoinTable.Col("audio_id").Eq(table.Col(idColumn))), + ).InnerJoin( + AudioFileTable, + goqu.On(AudioFileTable.Col("file_id").Eq(audiosFilesJoinTable.Col("file_id"))), + ) + + var ret float64 + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +func (qb *AudioStore) PlayDuration(ctx context.Context) (float64, error) { + table := qb.table() + + q := dialect.Select(goqu.COALESCE(goqu.SUM("play_duration"), 0)).From(table) + + var ret float64 + if err := querySimple(ctx, q, &ret); err != nil { + return 0, err + } + + return ret, nil +} + +// TODO - currently only used by unit test +func (qb *AudioStore) CountByStudioID(ctx context.Context, studioID int) (int, error) { + table := qb.table() + + q := dialect.Select(goqu.COUNT("*")).From(table).Where(table.Col(studioIDColumn).Eq(studioID)) + return count(ctx, q) +} + +func (qb *AudioStore) countMissingFingerprints(ctx context.Context, fpType string) (int, error) { + fpTable := fingerprintTableMgr.table.As("fingerprints_temp") + + q := dialect.From(audiosFilesJoinTable).LeftJoin( + fpTable, + goqu.On( + audiosFilesJoinTable.Col(fileIDColumn).Eq(fpTable.Col(fileIDColumn)), + fpTable.Col("type").Eq(fpType), + ), + ).Select(goqu.COUNT(goqu.DISTINCT(audiosFilesJoinTable.Col(audioIDColumn)))).Where(fpTable.Col("fingerprint").IsNull()) + + return count(ctx, q) +} + +// CountMissingChecksum returns the number of audios missing a checksum value. +func (qb *AudioStore) CountMissingChecksum(ctx context.Context) (int, error) { + return qb.countMissingFingerprints(ctx, "md5") +} + +// CountMissingOSHash returns the number of audios missing an oshash value. +func (qb *AudioStore) CountMissingOSHash(ctx context.Context) (int, error) { + return qb.countMissingFingerprints(ctx, "oshash") +} + +func (qb *AudioStore) Wall(ctx context.Context, q *string) ([]*models.Audio, error) { + s := "" + if q != nil { + s = *q + } + + table := qb.table() + qq := qb.selectDataset().Prepared(true).Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) + return qb.getMany(ctx, qq) +} + +func (qb *AudioStore) All(ctx context.Context) ([]*models.Audio, error) { + table := qb.table() + fileTable := fileTableMgr.table + folderTable := folderTableMgr.table + + return qb.getMany(ctx, qb.selectDataset().Order( + folderTable.Col("path").Asc(), + fileTable.Col("basename").Asc(), + table.Col("date").Asc(), + )) +} + +func (qb *AudioStore) makeQuery(ctx context.Context, audioFilter *models.AudioFilterType, findFilter *models.FindFilterType) (*queryBuilder, error) { + if audioFilter == nil { + audioFilter = &models.AudioFilterType{} + } + if findFilter == nil { + findFilter = &models.FindFilterType{} + } + + query := audioRepository.newQuery() + distinctIDs(&query, audioTable) + + if q := findFilter.Q; q != nil && *q != "" { + query.addJoins( + join{ + table: audiosFilesTable, + onClause: "audios_files.audio_id = audios.id", + }, + join{ + table: fileTable, + onClause: "audios_files.file_id = files.id", + }, + join{ + table: folderTable, + onClause: "files.parent_folder_id = folders.id", + }, + join{ + table: fingerprintTable, + onClause: "files_fingerprints.file_id = audios_files.file_id", + }, + join{ + table: audioMarkerTable, + onClause: "audio_markers.audio_id = audios.id", + }, + ) + + filepathColumn := "folders.path || '" + string(filepath.Separator) + "' || files.basename" + searchColumns := []string{"audios.title", "audios.details", filepathColumn, "files_fingerprints.fingerprint", "audio_markers.title"} + query.parseQueryString(searchColumns, *q) + } + + filter := filterBuilderFromHandler(ctx, &audioFilterHandler{ + audioFilter: audioFilter, + }) + + if err := query.addFilter(filter); err != nil { + return nil, err + } + + if err := qb.setAudioSort(&query, findFilter); err != nil { + return nil, err + } + query.sortAndPagination += getPagination(findFilter) + + return &query, nil +} + +func (qb *AudioStore) Query(ctx context.Context, options models.AudioQueryOptions) (*models.AudioQueryResult, error) { + query, err := qb.makeQuery(ctx, options.AudioFilter, options.FindFilter) + if err != nil { + return nil, err + } + + result, err := qb.queryGroupedFields(ctx, options, *query) + if err != nil { + return nil, fmt.Errorf("error querying aggregate fields: %w", err) + } + + idsResult, err := query.findIDs(ctx) + if err != nil { + return nil, fmt.Errorf("error finding IDs: %w", err) + } + + result.IDs = idsResult + return result, nil +} + +func (qb *AudioStore) queryGroupedFields(ctx context.Context, options models.AudioQueryOptions, query queryBuilder) (*models.AudioQueryResult, error) { + if !options.Count && !options.TotalDuration && !options.TotalSize { + // nothing to do - return empty result + return models.NewAudioQueryResult(qb), nil + } + + aggregateQuery := audioRepository.newQuery() + + if options.Count { + aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total") + } + + if options.TotalDuration { + query.addJoins( + join{ + table: audiosFilesTable, + onClause: "audios_files.audio_id = audios.id", + }, + join{ + table: AudioFileTable, + onClause: "audios_files.file_id = audio_files.file_id", + }, + ) + query.addColumn("COALESCE(audio_files.duration, 0) as duration") + aggregateQuery.addColumn("SUM(temp.duration) as duration") + } + + if options.TotalSize { + query.addJoins( + join{ + table: audiosFilesTable, + onClause: "audios_files.audio_id = audios.id", + }, + join{ + table: fileTable, + onClause: "audios_files.file_id = files.id", + }, + ) + query.addColumn("COALESCE(files.size, 0) as size") + aggregateQuery.addColumn("SUM(temp.size) as size") + } + + const includeSortPagination = false + aggregateQuery.from = fmt.Sprintf("(%s) as temp", query.toSQL(includeSortPagination)) + + out := struct { + Total int + Duration null.Float + Size null.Float + }{} + if err := audioRepository.queryStruct(ctx, aggregateQuery.toSQL(includeSortPagination), query.allArgs(), &out); err != nil { + return nil, err + } + + ret := models.NewAudioQueryResult(qb) + ret.Count = out.Total + ret.TotalDuration = out.Duration.Float64 + ret.TotalSize = out.Size.Float64 + return ret, nil +} + +func (qb *AudioStore) QueryCount(ctx context.Context, audioFilter *models.AudioFilterType, findFilter *models.FindFilterType) (int, error) { + query, err := qb.makeQuery(ctx, audioFilter, findFilter) + if err != nil { + return 0, err + } + + return query.executeCount(ctx) +} + +var audioSortOptions = sortOptions{ + "bitrate", + "created_at", + "code", + "date", + "file_count", + "filesize", + "duration", + "file_mod_time", + "samplerate", + "group_audio_number", + "id", + "last_o_at", + "last_played_at", + "o_counter", + "organized", + "performer_count", + "play_count", + "play_duration", + "resume_time", + "path", + "random", + "rating", + "resolution", + "studio", + "tag_count", + "title", + "updated_at", + "performer_age", +} + +func (qb *AudioStore) setAudioSort(query *queryBuilder, findFilter *models.FindFilterType) error { + if findFilter == nil || findFilter.Sort == nil || *findFilter.Sort == "" { + return nil + } + sort := findFilter.GetSort("title") + + // CVE-2024-32231 - ensure sort is in the list of allowed sorts + if err := audioSortOptions.validateSort(sort); err != nil { + return err + } + + addFileTable := func() { + query.addJoins( + join{ + sort: true, + table: audiosFilesTable, + onClause: "audios_files.audio_id = audios.id", + }, + join{ + sort: true, + table: fileTable, + onClause: "audios_files.file_id = files.id", + }, + ) + } + + addAudioFileTable := func() { + addFileTable() + query.addJoins( + join{ + sort: true, + table: audioFileTable, + onClause: "audio_files.file_id = audios_files.file_id", + }, + ) + } + + addFolderTable := func() { + query.addJoins( + join{ + sort: true, + table: folderTable, + onClause: "files.parent_folder_id = folders.id", + }, + ) + } + + direction := findFilter.GetDirection() + switch sort { + case "group_audio_number": + query.joinSort(groupsAudiosTable, "audio_group", "audios.id = audio_group.audio_id") + query.sortAndPagination += getSort("audio_index", direction, "audio_group") + case "tag_count": + query.sortAndPagination += getCountSort(audioTable, audiosTagsTable, audioIDColumn, direction) + case "performer_count": + query.sortAndPagination += getCountSort(audioTable, performersAudiosTable, audioIDColumn, direction) + case "file_count": + query.sortAndPagination += getCountSort(audioTable, audiosFilesTable, audioIDColumn, direction) + case "path": + // special handling for path + addFileTable() + addFolderTable() + query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) + case "bitrate": + sort = "bit_rate" + addAudioFileTable() + query.sortAndPagination += getSort(sort, direction, audioFileTable) + case "file_mod_time": + sort = "mod_time" + addFileTable() + query.sortAndPagination += getSort(sort, direction, fileTable) + case "samplerate": + sort = "sample_rate" + addAudioFileTable() + query.sortAndPagination += getSort(sort, direction, audioFileTable) + case "resolution": + addAudioFileTable() + query.sortAndPagination += fmt.Sprintf(" ORDER BY MIN(%s.width, %s.height) %s", audioFileTable, audioFileTable, getSortDirection(direction)) + case "filesize": + addFileTable() + query.sortAndPagination += getSort(sort, direction, fileTable) + case "duration": + addAudioFileTable() + query.sortAndPagination += getSort(sort, direction, audioFileTable) + case "title": + addFileTable() + addFolderTable() + query.sortAndPagination += " ORDER BY COALESCE(audios.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction + case "play_count": + query.sortAndPagination += getCountSort(audioTable, audiosViewDatesTable, audioIDColumn, direction) + case "last_played_at": + query.sortAndPagination += fmt.Sprintf(" ORDER BY (SELECT MAX(view_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", audiosViewDatesTable, audioIDColumn, audioTable, getSortDirection(direction)) + case "last_o_at": + query.sortAndPagination += fmt.Sprintf(" ORDER BY (SELECT MAX(o_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", audiosODatesTable, audioIDColumn, audioTable, getSortDirection(direction)) + case "o_counter": + query.sortAndPagination += getCountSort(audioTable, audiosODatesTable, audioIDColumn, direction) + case "performer_age": + // Looking at the youngest performer by default + aggregation := "MIN" + if direction == "DESC" { + // When sorting by performer_'s age DESC, I should consider the oldest performer instead + aggregation = "MAX" + } + fallback := "NULL" + if direction == "ASC" { + // When sorting ascending, NULLs are first by default. Coalescing to the MAX int value supported by sqlite + fallback = "9223372036854775807" + } + query.sortAndPagination += fmt.Sprintf( + " ORDER BY (SELECT COALESCE(%s(JulianDay(audios.date) - JulianDay(performers.birthdate)), %s) FROM %s as performers INNER JOIN %s AS aggregation WHERE performers.id = aggregation.%s AND aggregation.%s = %s.id) %s", + aggregation, + fallback, + performerTable, + performersAudiosTable, + performerIDColumn, + audioIDColumn, + audioTable, + getSortDirection(direction), + ) + case "studio": + query.joinSort(studioTable, "", "audios.studio_id = studios.id") + query.sortAndPagination += getSort("name", direction, studioTable) + default: + query.sortAndPagination += getSort(sort, direction, "audios") + } + + // Whatever the sorting, always use title/id as a final sort + query.sortAndPagination += ", COALESCE(audios.title, audios.id) COLLATE NATURAL_CI ASC" + + return nil +} + +func (qb *AudioStore) SaveActivity(ctx context.Context, id int, resumeTime *float64, playDuration *float64) (bool, error) { + if err := qb.tableMgr.checkIDExists(ctx, id); err != nil { + return false, err + } + + record := goqu.Record{} + + if resumeTime != nil { + record["resume_time"] = resumeTime + } + + if playDuration != nil { + record["play_duration"] = goqu.L("play_duration + ?", playDuration) + } + + if len(record) > 0 { + if err := qb.tableMgr.updateByID(ctx, id, record); err != nil { + return false, err + } + } + + return true, nil +} + +func (qb *AudioStore) ResetActivity(ctx context.Context, id int, resetResume bool, resetDuration bool) (bool, error) { + if err := qb.tableMgr.checkIDExists(ctx, id); err != nil { + return false, err + } + + record := goqu.Record{} + + if resetResume { + record["resume_time"] = 0.0 + } + + if resetDuration { + record["play_duration"] = 0.0 + } + + if len(record) > 0 { + if err := qb.tableMgr.updateByID(ctx, id, record); err != nil { + return false, err + } + } + + return true, nil +} + +func (qb *AudioStore) GetURLs(ctx context.Context, audioID int) ([]string, error) { + return audiosURLsTableMgr.get(ctx, audioID) +} + +func (qb *AudioStore) GetCover(ctx context.Context, audioID int) ([]byte, error) { + return qb.GetImage(ctx, audioID, audioCoverBlobColumn) +} + +func (qb *AudioStore) HasCover(ctx context.Context, audioID int) (bool, error) { + return qb.HasImage(ctx, audioID, audioCoverBlobColumn) +} + +func (qb *AudioStore) UpdateCover(ctx context.Context, audioID int, image []byte) error { + return qb.UpdateImage(ctx, audioID, audioCoverBlobColumn, image) +} + +func (qb *AudioStore) destroyCover(ctx context.Context, audioID int) error { + return qb.DestroyImage(ctx, audioID, audioCoverBlobColumn) +} + +func (qb *AudioStore) AssignFiles(ctx context.Context, audioID int, fileIDs []models.FileID) error { + // assuming a file can only be assigned to a single audio + if err := audiosFilesTableMgr.destroyJoins(ctx, fileIDs); err != nil { + return err + } + + // assign primary only if destination has no files + existingFileIDs, err := audioRepository.files.get(ctx, audioID) + if err != nil { + return err + } + + firstPrimary := len(existingFileIDs) == 0 + return audiosFilesTableMgr.insertJoins(ctx, audioID, firstPrimary, fileIDs) +} + +func (qb *AudioStore) GetGroups(ctx context.Context, id int) (ret []models.GroupsAudios, err error) { + ret = []models.GroupsAudios{} + + if err := audioRepository.groups.getAll(ctx, id, func(rows *sqlx.Rows) error { + var ms groupsAudiosRow + if err := rows.StructScan(&ms); err != nil { + return err + } + + ret = append(ret, ms.resolve(id)) + return nil + }); err != nil { + return nil, err + } + + return ret, nil +} + +func (qb *AudioStore) AddFileID(ctx context.Context, id int, fileID models.FileID) error { + const firstPrimary = false + return audiosFilesTableMgr.insertJoins(ctx, id, firstPrimary, []models.FileID{fileID}) +} + +func (qb *AudioStore) GetPerformerIDs(ctx context.Context, id int) ([]int, error) { + return audioRepository.performers.getIDs(ctx, id) +} + +func (qb *AudioStore) GetTagIDs(ctx context.Context, id int) ([]int, error) { + return audioRepository.tags.getIDs(ctx, id) +} + +func (qb *AudioStore) GetGalleryIDs(ctx context.Context, id int) ([]int, error) { + return audioRepository.galleries.getIDs(ctx, id) +} + +func (qb *AudioStore) AddGalleryIDs(ctx context.Context, audioID int, galleryIDs []int) error { + return audiosGalleriesTableMgr.addJoins(ctx, audioID, galleryIDs) +} + +func (qb *AudioStore) FindDuplicates(ctx context.Context, distance int, durationDiff float64) ([][]*models.Audio, error) { + var dupeIds [][]int + if distance == 0 { + var ids []string + if err := dbWrapper.Select(ctx, &ids, findExactDuplicateQuery, durationDiff); err != nil { + return nil, err + } + + for _, id := range ids { + strIds := strings.Split(id, ",") + var audioIds []int + for _, strId := range strIds { + if intId, err := strconv.Atoi(strId); err == nil { + audioIds = sliceutil.AppendUnique(audioIds, intId) + } + } + // filter out + if len(audioIds) > 1 { + dupeIds = append(dupeIds, audioIds) + } + } + } else { + var hashes []*utils.Phash + + if err := audioRepository.queryFunc(ctx, findAllPhashesQuery, nil, false, func(rows *sqlx.Rows) error { + phash := utils.Phash{ + Bucket: -1, + Duration: -1, + } + if err := rows.StructScan(&phash); err != nil { + return err + } + + hashes = append(hashes, &phash) + return nil + }); err != nil { + return nil, err + } + + dupeIds = utils.FindDuplicates(hashes, distance, durationDiff) + } + + var duplicates [][]*models.Audio + for _, audioIds := range dupeIds { + if audios, err := qb.FindMany(ctx, audioIds); err == nil { + duplicates = append(duplicates, audios) + } + } + + sortByPath(duplicates) + + return duplicates, nil +} + +func sortByPath(audios [][]*models.Audio) { + lessFunc := func(i int, j int) bool { + firstPathI := getFirstPath(audios[i]) + firstPathJ := getFirstPath(audios[j]) + return firstPathI < firstPathJ + } + sort.SliceStable(audios, lessFunc) +} + +func getFirstPath(audios []*models.Audio) string { + var firstPath string + for i, audio := range audios { + if i == 0 || audio.Path < firstPath { + firstPath = audio.Path + } + } + return firstPath +} diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 7c383dc4ca..32db788b33 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -34,7 +34,7 @@ const ( cacheSizeEnv = "STASH_SQLITE_CACHE_SIZE" ) -var appSchemaVersion uint = 85 +var appSchemaVersion uint = 86 //go:embed migrations/*.sql var migrationsBox embed.FS @@ -69,6 +69,7 @@ type storeRepository struct { Blobs *BlobStore File *FileStore Folder *FolderStore + Audio *AudioStore Image *ImageStore Gallery *GalleryStore GalleryChapter *GalleryChapterStore @@ -109,6 +110,7 @@ func NewDatabase() *Database { Folder: folderStore, Scene: NewSceneStore(r, blobStore), SceneMarker: NewSceneMarkerStore(), + Audio: NewAudioStore(r), Image: NewImageStore(r), Gallery: galleryStore, GalleryChapter: NewGalleryChapterStore(), diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index b8e807e377..96a01d3885 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -1,3 +1,4 @@ +// TODO(audio): update this file package sqlite import ( @@ -20,10 +21,12 @@ import ( const ( fileTable = "files" videoFileTable = "video_files" + audioFileTable = "audio_files" imageFileTable = "image_files" fileIDColumn = "file_id" videoCaptionsTable = "video_captions" + audioCaptionsTable = "audio_captions" captionCodeColumn = "language_code" captionFilenameColumn = "filename" captionTypeColumn = "caption_type" diff --git a/pkg/sqlite/migrations/86_audio.up.sql b/pkg/sqlite/migrations/86_audio.up.sql new file mode 100644 index 0000000000..b0a044b0c7 --- /dev/null +++ b/pkg/sqlite/migrations/86_audio.up.sql @@ -0,0 +1,123 @@ +-------------------------------------------- +-- audios definition +-- +CREATE TABLE "audios" ( + `id` integer not null primary key autoincrement, + `title` varchar(255), + `details` text, + `date` date, + `rating` tinyint, + `studio_id` integer, + `organized` boolean not null default '0', + `created_at` datetime not null, + `updated_at` datetime not null, + `code` text, + `resume_time` float not null default 0, + `play_duration` float not null default 0, + "date_precision" TINYINT, + foreign key(`studio_id`) references `studios`(`id`) on delete + SET NULL +); +CREATE INDEX `index_audios_on_studio_id` on `audios` (`studio_id`); +-------------------------------------------- +-- audios_o_dates definition +-- +CREATE TABLE "audios_o_dates" ( + `audio_id` integer not null, + `o_date` datetime not null, + foreign key(`audio_id`) references `audios`(`id`) on delete CASCADE +); +CREATE INDEX `index_audios_o_dates` ON `audios_o_dates` (`audio_id`); +-------------------------------------------- +-- audios_tags definition +-- +CREATE TABLE "audios_tags" ( + `audio_id` integer, + `tag_id` integer, + foreign key(`audio_id`) references `audios`(`id`) on delete CASCADE, + foreign key(`tag_id`) references `tags`(`id`) on delete CASCADE, + PRIMARY KEY(`audio_id`, `tag_id`) +); +CREATE INDEX `index_audios_tags_on_tag_id` on `audios_tags` (`tag_id`); +-------------------------------------------- +-- audios_view_dates definition +-- +CREATE TABLE "audios_view_dates" ( + `audio_id` integer not null, + `view_date` datetime not null, + foreign key(`audio_id`) references `audios`(`id`) on delete CASCADE +); +CREATE INDEX `index_audios_view_dates` ON `audios_view_dates` (`audio_id`); +-------------------------------------------- +-- groups_audios definition +-- +CREATE TABLE "groups_audios" ( + "group_id" integer, + `audio_id` integer, + `audio_index` tinyint, + foreign key("group_id") references "groups"(`id`) on delete cascade, + foreign key(`audio_id`) references `audios`(`id`) on delete cascade, + PRIMARY KEY("group_id", `audio_id`) +); +CREATE INDEX `index_group_audios_on_group_id` on "groups_audios" ("group_id"); +-------------------------------------------- +-- performers_audios definition +-- +CREATE TABLE "performers_audios" ( + `performer_id` integer, + `audio_id` integer, + foreign key(`performer_id`) references `performers`(`id`) on delete CASCADE, + foreign key(`audio_id`) references `audios`(`id`) on delete CASCADE, + PRIMARY KEY (`audio_id`, `performer_id`) +); +CREATE INDEX `index_performers_audios_on_performer_id` on `performers_audios` (`performer_id`); +-------------------------------------------- +-- audio_custom_fields definition +-- +CREATE TABLE `audio_custom_fields` ( + `audio_id` integer NOT NULL, + `field` varchar(64) NOT NULL, + `value` BLOB NOT NULL, + PRIMARY KEY (`audio_id`, `field`), + foreign key(`audio_id`) references `audios`(`id`) on delete CASCADE +); +CREATE INDEX `index_audio_custom_fields_field_value` ON `audio_custom_fields` (`field`, `value`); +-------------------------------------------- +-- audio_urls definition +-- +CREATE TABLE `audio_urls` ( + `audio_id` integer NOT NULL, + `position` integer NOT NULL, + `url` varchar(255) NOT NULL, + foreign key(`audio_id`) references `audios`(`id`) on delete CASCADE, + PRIMARY KEY(`audio_id`, `position`, `url`) +); +CREATE INDEX `audio_urls_url` on `audio_urls` (`url`); +-------------------------------------------- +-- audios_files definition +-- +CREATE TABLE `audios_files` ( + `audio_id` integer NOT NULL, + `file_id` integer NOT NULL, + `primary` boolean NOT NULL, + foreign key(`audio_id`) references `audios`(`id`) on delete CASCADE, + foreign key(`file_id`) references `files`(`id`) on delete CASCADE, + PRIMARY KEY(`audio_id`, `file_id`) +); +CREATE INDEX `index_audios_files_file_id` ON `audios_files` (`file_id`); +CREATE UNIQUE INDEX `unique_index_audios_files_on_primary` on `audios_files` (`audio_id`) +WHERE `primary` = 1; +-------------------------------------------- +-- audio_files definition +-- + +-- TODO: think of better name for this, too close to `audios_files` +CREATE TABLE `audio_files` ( + `file_id` integer NOT NULL primary key, + `duration` float NOT NULL, + `format` varchar(255) NOT NULL, + `audio_codec` varchar(255) NOT NULL, + `sample_rate` float NOT NULL, + `bit_rate` integer NOT NULL, + foreign key(`file_id`) references `files`(`id`) on delete CASCADE +); \ No newline at end of file diff --git a/pkg/sqlite/tables.go b/pkg/sqlite/tables.go index 4c09113f0c..825d0c2976 100644 --- a/pkg/sqlite/tables.go +++ b/pkg/sqlite/tables.go @@ -257,6 +257,11 @@ var ( idColumn: goqu.T(videoFileTable).Col(fileIDColumn), } + audioFileTableMgr = &table{ + table: goqu.T(audioFileTable), + idColumn: goqu.T(audioFileTable).Col(fileIDColumn), + } + imageFileTableMgr = &table{ table: goqu.T(imageFileTable), idColumn: goqu.T(imageFileTable).Col(fileIDColumn),