refactor and to go mod

This commit is contained in:
YouROK
2021-02-18 16:56:55 +03:00
parent 0e49a98626
commit 94f212fa75
50 changed files with 13 additions and 29 deletions

View File

@@ -0,0 +1,219 @@
package torrstor
import (
"sort"
"sync"
"github.com/anacrolix/torrent"
"server/log"
"server/settings"
"server/torr/storage/state"
"server/torr/utils"
"github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/storage"
)
type Cache struct {
storage.TorrentImpl
storage *Storage
capacity int64
filled int64
hash metainfo.Hash
pieceLength int64
pieceCount int
pieces map[int]*Piece
readers map[*Reader]struct{}
muReaders sync.Mutex
isRemove bool
muRemove sync.Mutex
torrent *torrent.Torrent
}
func NewCache(capacity int64, storage *Storage) *Cache {
ret := &Cache{
capacity: capacity,
filled: 0,
pieces: make(map[int]*Piece),
storage: storage,
readers: make(map[*Reader]struct{}),
}
return ret
}
func (c *Cache) Init(info *metainfo.Info, hash metainfo.Hash) {
log.TLogln("Create cache for:", info.Name, hash.HexString())
if c.capacity == 0 {
c.capacity = info.PieceLength * 4
}
c.pieceLength = info.PieceLength
c.pieceCount = info.NumPieces()
c.hash = hash
for i := 0; i < c.pieceCount; i++ {
c.pieces[i] = &Piece{
Id: i,
Length: info.Piece(i).Length(),
Hash: info.Piece(i).Hash().HexString(),
cache: c,
}
}
}
func (c *Cache) SetTorrent(torr *torrent.Torrent) {
c.torrent = torr
}
func (c *Cache) Piece(m metainfo.Piece) storage.PieceImpl {
if val, ok := c.pieces[m.Index()]; ok {
return val
}
return nil
}
func (c *Cache) Close() error {
log.TLogln("Close cache for:", c.hash)
if _, ok := c.storage.caches[c.hash]; ok {
delete(c.storage.caches, c.hash)
}
c.pieces = nil
c.muReaders.Lock()
c.readers = nil
c.muReaders.Unlock()
utils.FreeOSMemGC()
return nil
}
func (c *Cache) removePiece(piece *Piece) {
piece.Release()
utils.FreeOSMemGC()
}
func (c *Cache) AdjustRA(readahead int64) {
if settings.BTsets.CacheSize == 0 {
c.capacity = readahead * 3
}
c.muReaders.Lock()
for r, _ := range c.readers {
r.SetReadahead(readahead)
}
c.muReaders.Unlock()
}
func (c *Cache) GetState() *state.CacheState {
cState := new(state.CacheState)
piecesState := make(map[int]state.ItemState, 0)
var fill int64 = 0
for _, p := range c.pieces {
if p.Size > 0 {
fill += p.Length
piecesState[p.Id] = state.ItemState{
Id: p.Id,
Size: p.Size,
Length: p.Length,
Completed: p.complete,
}
}
}
readersState := make([]*state.ReaderState, 0)
c.muReaders.Lock()
for r, _ := range c.readers {
rng := r.getPiecesRange()
pc := r.getReaderPiece()
readersState = append(readersState, &state.ReaderState{
Start: rng.Start,
End: rng.End,
Reader: pc,
})
}
c.muReaders.Unlock()
c.filled = fill
cState.Capacity = c.capacity
cState.PiecesLength = c.pieceLength
cState.PiecesCount = c.pieceCount
cState.Hash = c.hash.HexString()
cState.Filled = fill
cState.Pieces = piecesState
cState.Readers = readersState
return cState
}
func (c *Cache) cleanPieces() {
if c.isRemove {
return
}
c.muRemove.Lock()
if c.isRemove {
c.muRemove.Unlock()
return
}
c.isRemove = true
defer func() { c.isRemove = false }()
c.muRemove.Unlock()
remPieces := c.getRemPieces()
if c.filled > c.capacity {
rems := (c.filled - c.capacity) / c.pieceLength
for _, p := range remPieces {
c.removePiece(p)
rems--
if rems <= 0 {
break
}
}
}
}
func (c *Cache) getRemPieces() []*Piece {
piecesRemove := make([]*Piece, 0)
fill := int64(0)
ranges := make([]Range, 0)
c.muReaders.Lock()
for r, _ := range c.readers {
ranges = append(ranges, r.getPiecesRange())
}
c.muReaders.Unlock()
ranges = mergeRange(ranges)
for id, p := range c.pieces {
if p.Size > 0 {
fill += p.Size
}
if len(ranges) > 0 {
if !inRanges(ranges, id) {
piece := c.torrent.Piece(id)
if piece.State().Priority != torrent.PiecePriorityNone {
piece.SetPriority(torrent.PiecePriorityNone)
}
if p.Size > 0 {
piecesRemove = append(piecesRemove, p)
}
}
} else {
piece := c.torrent.Piece(id)
if piece.State().Priority != torrent.PiecePriorityNone {
piece.SetPriority(torrent.PiecePriorityNone)
}
}
}
sort.Slice(piecesRemove, func(i, j int) bool {
return piecesRemove[i].accessed < piecesRemove[j].accessed
})
c.filled = fill
return piecesRemove
}

View File

@@ -0,0 +1,106 @@
package torrstor
import (
"errors"
"io"
"sync"
"time"
"github.com/anacrolix/torrent"
"github.com/anacrolix/torrent/storage"
)
type Piece struct {
storage.PieceImpl
Id int
Hash string
Length int64
Size int64
complete bool
readed bool
accessed int64
buffer []byte
mu sync.RWMutex
cache *Cache
}
func (p *Piece) WriteAt(b []byte, off int64) (n int, err error) {
p.mu.Lock()
defer p.mu.Unlock()
if p.buffer == nil {
go p.cache.cleanPieces()
p.buffer = make([]byte, p.cache.pieceLength)
}
n = copy(p.buffer[off:], b[:])
p.Size += int64(n)
p.accessed = time.Now().Unix()
return
}
func (p *Piece) ReadAt(b []byte, off int64) (n int, err error) {
p.mu.RLock()
defer p.mu.RUnlock()
size := len(b)
if size+int(off) > len(p.buffer) {
size = len(p.buffer) - int(off)
if size < 0 {
size = 0
}
}
if len(p.buffer) < int(off) || len(p.buffer) < int(off)+size {
return 0, io.EOF
}
n = copy(b, p.buffer[int(off) : int(off)+size][:])
p.accessed = time.Now().Unix()
if int(off)+size >= len(p.buffer) {
p.readed = true
}
if int64(len(b))+off >= p.Size {
go p.cache.cleanPieces()
}
if n == 0 && err == nil {
return 0, io.EOF
}
return n, nil
}
func (p *Piece) MarkComplete() error {
if len(p.buffer) == 0 {
return errors.New("piece is not complete")
}
p.complete = true
return nil
}
func (p *Piece) MarkNotComplete() error {
p.complete = false
return nil
}
func (p *Piece) Completion() storage.Completion {
return storage.Completion{
Complete: p.complete && len(p.buffer) > 0,
Ok: true,
}
}
func (p *Piece) Release() {
p.mu.Lock()
defer p.mu.Unlock()
if p.buffer != nil {
p.buffer = nil
}
p.Size = 0
p.complete = false
//Костыль чтобы двиг понял что куска нет, иногда загружает его по новый хз почему
pce := p.cache.torrent.Piece(p.Id)
pce.SetPriority(torrent.PiecePriorityNone)
pce.UpdateCompletion()
pce.SetPriority(torrent.PiecePriorityNone)
}

View File

@@ -0,0 +1,49 @@
package torrstor
import (
"sort"
)
type Range struct {
Start, End int
}
func inRanges(ranges []Range, ind int) bool {
for _, r := range ranges {
if ind >= r.Start && ind <= r.End {
return true
}
}
return false
}
func mergeRange(ranges []Range) []Range {
if len(ranges) <= 1 {
return ranges
}
// copy ranges
merged := append([]Range(nil), ranges...)
sort.Slice(merged, func(i, j int) bool {
if merged[i].Start < merged[j].Start {
return true
}
if merged[i].Start == merged[j].Start && merged[i].End < merged[j].End {
return true
}
return false
})
j := 0
for i := 1; i < len(merged); i++ {
if merged[j].End >= merged[i].Start {
if merged[j].End < merged[i].End {
merged[j].End = merged[i].End
}
} else {
j++
merged[j] = merged[i]
}
}
return merged[:j+1]
}

View File

@@ -0,0 +1,114 @@
package torrstor
import (
"io"
"sync"
"github.com/anacrolix/torrent"
"server/log"
)
type Reader struct {
torrent.Reader
offset int64
readahead int64
file *torrent.File
cache *Cache
isClosed bool
///Preload
muPreload sync.Mutex
}
func newReader(file *torrent.File, cache *Cache) *Reader {
r := new(Reader)
r.file = file
r.Reader = file.NewReader()
r.SetReadahead(0)
r.cache = cache
cache.muReaders.Lock()
cache.readers[r] = struct{}{}
cache.muReaders.Unlock()
return r
}
func (r *Reader) Seek(offset int64, whence int) (n int64, err error) {
if r.isClosed {
return 0, io.EOF
}
switch whence {
case io.SeekStart:
r.offset = offset
case io.SeekCurrent:
r.offset += offset
case io.SeekEnd:
r.offset = r.file.Length() + offset
}
n, err = r.Reader.Seek(offset, whence)
r.offset = n
return
}
func (r *Reader) Read(p []byte) (n int, err error) {
err = io.EOF
if r.isClosed {
return
}
if r.file.Torrent() != nil && r.file.Torrent().Info() != nil {
n, err = r.Reader.Read(p)
r.offset += int64(n)
go r.preload()
} else {
log.TLogln("Torrent closed and readed")
}
return
}
func (r *Reader) SetReadahead(length int64) {
r.Reader.SetReadahead(length)
r.readahead = length
}
func (r *Reader) Offset() int64 {
return r.offset
}
func (r *Reader) Readahead() int64 {
return r.readahead
}
func (r *Reader) Close() {
// file reader close in gotorrent
// this struct close in cache
r.isClosed = true
if len(r.file.Torrent().Files()) > 0 {
r.Reader.Close()
}
go r.cache.getRemPieces()
}
func (c *Cache) NewReader(file *torrent.File) *Reader {
return newReader(file, c)
}
func (c *Cache) Readers() int {
if c == nil {
return 0
}
c.muReaders.Lock()
defer c.muReaders.Unlock()
if c == nil || c.readers == nil {
return 0
}
return len(c.readers)
}
func (c *Cache) CloseReader(r *Reader) {
r.cache.muReaders.Lock()
r.Close()
delete(r.cache.readers, r)
r.cache.muReaders.Unlock()
}

View File

@@ -0,0 +1,54 @@
package torrstor
import (
"github.com/anacrolix/torrent"
"server/settings"
)
func (r *Reader) getPiecesRange() Range {
startOff, endOff := r.getOffsetRange()
return Range{r.getPieceNum(startOff), r.getPieceNum(endOff)}
}
func (r *Reader) getReaderPiece() int {
readerOff := r.offset
return r.getPieceNum(readerOff)
}
func (r *Reader) getPieceNum(offset int64) int {
return int((offset + r.file.Offset()) / r.cache.pieceLength)
}
func (r *Reader) getOffsetRange() (int64, int64) {
prc := int64(settings.BTsets.ReaderReadAHead)
readers := int64(len(r.cache.readers))
if readers == 0 {
readers = 1
}
beginOffset := r.offset - (r.cache.capacity/readers)*(100-prc)/100
endOffset := r.offset + (r.cache.capacity/readers)*prc/100
if beginOffset < 0 {
beginOffset = 0
}
if endOffset > r.file.Length() {
endOffset = r.file.Length()
}
return beginOffset, endOffset
}
func (r *Reader) preload() {
torr := r.file.Torrent()
rrange := r.getPiecesRange()
rahPiece := int(r.readahead / torr.Info().PieceLength)
readerPiece := r.getReaderPiece()
// from reader readahead to end of range
for i := readerPiece + rahPiece; i < rrange.End; i++ {
if torr.Piece(i).State().Priority == torrent.PiecePriorityNone {
torr.Piece(i).SetPriority(torrent.PiecePriorityNormal)
}
}
}

View File

@@ -0,0 +1,64 @@
package torrstor
import (
"sync"
"server/torr/storage"
"github.com/anacrolix/torrent/metainfo"
storage2 "github.com/anacrolix/torrent/storage"
)
type Storage struct {
storage.Storage
caches map[metainfo.Hash]*Cache
capacity int64
mu sync.Mutex
}
func NewStorage(capacity int64) *Storage {
stor := new(Storage)
stor.capacity = capacity
stor.caches = make(map[metainfo.Hash]*Cache)
return stor
}
func (s *Storage) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage2.TorrentImpl, error) {
s.mu.Lock()
defer s.mu.Unlock()
ch := NewCache(s.capacity, s)
ch.Init(info, infoHash)
s.caches[infoHash] = ch
return ch, nil
}
func (s *Storage) CloseHash(hash metainfo.Hash) {
if s.caches == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if ch, ok := s.caches[hash]; ok {
ch.Close()
delete(s.caches, hash)
}
}
func (s *Storage) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
for _, ch := range s.caches {
ch.Close()
}
return nil
}
func (s *Storage) GetCache(hash metainfo.Hash) *Cache {
s.mu.Lock()
defer s.mu.Unlock()
if cache, ok := s.caches[hash]; ok {
return cache
}
return nil
}