feat: add support for pcap replay

This commit is contained in:
eddc005 2024-05-06 20:28:36 +01:00
parent 5723490a6c
commit 94387450cf
5 changed files with 176 additions and 10 deletions

View File

@ -43,6 +43,7 @@ var logger *zap.Logger
// Flags // Flags
var ( var (
cfgFile string cfgFile string
pcapFile string
logLevel string logLevel string
logFormat string logFormat string
) )
@ -118,6 +119,7 @@ func init() {
func initFlags() { func initFlags() {
rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file") rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file")
rootCmd.PersistentFlags().StringVarP(&pcapFile, "pcap", "p", "", "pcap file (optional)")
rootCmd.PersistentFlags().StringVarP(&logLevel, "log-level", "l", envOrDefaultString(appLogLevelEnv, "info"), "log level") rootCmd.PersistentFlags().StringVarP(&logLevel, "log-level", "l", envOrDefaultString(appLogLevelEnv, "info"), "log level")
rootCmd.PersistentFlags().StringVarP(&logFormat, "log-format", "f", envOrDefaultString(appLogFormatEnv, "console"), "log format") rootCmd.PersistentFlags().StringVarP(&logFormat, "log-format", "f", envOrDefaultString(appLogFormatEnv, "console"), "log format")
} }
@ -133,6 +135,9 @@ func initConfig() {
viper.AddConfigPath("$HOME/.opengfw") viper.AddConfigPath("$HOME/.opengfw")
viper.AddConfigPath("/etc/opengfw") viper.AddConfigPath("/etc/opengfw")
} }
viper.SetDefault("replay.realtime", true)
viper.SetDefault("replay.replayDelay", 10 * time.Millisecond)
} }
func initLogger() { func initLogger() {
@ -167,6 +172,7 @@ type cliConfig struct {
IO cliConfigIO `mapstructure:"io"` IO cliConfigIO `mapstructure:"io"`
Workers cliConfigWorkers `mapstructure:"workers"` Workers cliConfigWorkers `mapstructure:"workers"`
Ruleset cliConfigRuleset `mapstructure:"ruleset"` Ruleset cliConfigRuleset `mapstructure:"ruleset"`
Replay cliConfigReplay `mapstructure:"replay"`
} }
type cliConfigIO struct { type cliConfigIO struct {
@ -177,6 +183,11 @@ type cliConfigIO struct {
RST bool `mapstructure:"rst"` RST bool `mapstructure:"rst"`
} }
type cliConfigReplay struct {
Realtime bool `mapstructure:"realtime"`
ReplayDelay time.Duration `mapstructure:"replayDelay"`
}
type cliConfigWorkers struct { type cliConfigWorkers struct {
Count int `mapstructure:"count"` Count int `mapstructure:"count"`
QueueSize int `mapstructure:"queueSize"` QueueSize int `mapstructure:"queueSize"`
@ -197,17 +208,31 @@ func (c *cliConfig) fillLogger(config *engine.Config) error {
} }
func (c *cliConfig) fillIO(config *engine.Config) error { func (c *cliConfig) fillIO(config *engine.Config) error {
nfio, err := io.NewNFQueuePacketIO(io.NFQueuePacketIOConfig{ var ioImpl io.PacketIO
QueueSize: c.IO.QueueSize, var err error
ReadBuffer: c.IO.ReadBuffer, if pcapFile != "" {
WriteBuffer: c.IO.WriteBuffer, // Setup IO for pcap file replay
Local: c.IO.Local, logger.Info("replaying from pcap file", zap.String("pcap file", pcapFile))
RST: c.IO.RST, ioImpl, err = io.NewPcapPacketIO(io.PcapPacketIOConfig{
}) PcapFile: pcapFile,
Realtime: c.Replay.Realtime,
ReplayDelay: c.Replay.ReplayDelay,
})
} else {
// Setup IO for nfqueue
ioImpl, err = io.NewNFQueuePacketIO(io.NFQueuePacketIOConfig{
QueueSize: c.IO.QueueSize,
ReadBuffer: c.IO.ReadBuffer,
WriteBuffer: c.IO.WriteBuffer,
Local: c.IO.Local,
RST: c.IO.RST,
})
}
if err != nil { if err != nil {
return configError{Field: "io", Err: err} return configError{Field: "io", Err: err}
} }
config.IO = nfio config.IO = ioImpl
return nil return nil
} }

View File

@ -58,12 +58,17 @@ func (e *engine) UpdateRuleset(r ruleset.Ruleset) error {
} }
func (e *engine) Run(ctx context.Context) error { func (e *engine) Run(ctx context.Context) error {
workerCtx, workerCancel := context.WithCancel(ctx)
defer workerCancel() // Stop workers
// Register IO shutdown
ioCtx, ioCancel := context.WithCancel(ctx) ioCtx, ioCancel := context.WithCancel(ctx)
defer ioCancel() // Stop workers & IO e.io.SetCancelFunc(ioCancel)
defer ioCancel() // Stop IO
// Start workers // Start workers
for _, w := range e.workers { for _, w := range e.workers {
go w.Run(ioCtx) go w.Run(workerCtx)
} }
// Register IO callback // Register IO callback
@ -85,6 +90,8 @@ func (e *engine) Run(ctx context.Context) error {
return err return err
case <-ctx.Done(): case <-ctx.Done():
return nil return nil
case <-ioCtx.Done():
return nil
} }
} }

View File

@ -48,6 +48,9 @@ type PacketIO interface {
ProtectedDialContext(ctx context.Context, network, address string) (net.Conn, error) ProtectedDialContext(ctx context.Context, network, address string) (net.Conn, error)
// Close closes the packet IO. // Close closes the packet IO.
Close() error Close() error
// SetCancelFunc gives packet IO access to context cancel function, enabling it to
// trigger a shutdown
SetCancelFunc(cancelFunc context.CancelFunc) error
} }
type ErrInvalidPacket struct { type ErrInvalidPacket struct {

View File

@ -281,6 +281,11 @@ func (n *nfqueuePacketIO) Close() error {
return n.n.Close() return n.n.Close()
} }
// nfqueue IO does not issue shutdown
func (n *nfqueuePacketIO) SetCancelFunc(cancelFunc context.CancelFunc) error {
return nil
}
func (n *nfqueuePacketIO) setupNft(local, rst, remove bool) error { func (n *nfqueuePacketIO) setupNft(local, rst, remove bool) error {
rules, err := generateNftRules(local, rst) rules, err := generateNftRules(local, rst)
if err != nil { if err != nil {

126
io/pcap.go Normal file
View File

@ -0,0 +1,126 @@
package io
import (
"context"
"hash/crc32"
"net"
"sort"
"strings"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/pcap"
)
var _ PacketIO = (*pcapPacketIO)(nil)
type pcapPacketIO struct {
pcap *pcap.Handle
lastTime *time.Time
ioCancel context.CancelFunc
config PcapPacketIOConfig
}
type PcapPacketIOConfig struct {
PcapFile string
Realtime bool
ReplayDelay time.Duration
}
func NewPcapPacketIO(config PcapPacketIOConfig) (PacketIO, error) {
handle, err := pcap.OpenOffline(config.PcapFile)
if err != nil {
return nil, err
}
print(config.ReplayDelay)
return &pcapPacketIO{
pcap: handle,
lastTime: nil,
ioCancel: nil,
config: config,
}, nil
}
func (p *pcapPacketIO) Register(ctx context.Context, cb PacketCallback) error {
go func() {
packetSource := gopacket.NewPacketSource(p.pcap, p.pcap.LinkType())
for packet := range packetSource.Packets() {
p.wait(packet)
networkLayer := packet.NetworkLayer()
if networkLayer != nil {
src, dst := networkLayer.NetworkFlow().Endpoints()
endpoints := []string{src.String(), dst.String()}
sort.Strings(endpoints)
id := crc32.Checksum([]byte(strings.Join(endpoints, ",")), crc32.IEEETable)
cb(&pcapPacket{
streamID: id,
data: packet.LinkLayer().LayerPayload(),
}, nil)
}
}
// Give the workers a chance to finish everything
time.Sleep(time.Second)
// Stop the engine when all packets are finished
p.ioCancel()
}()
return nil
}
func (p *pcapPacketIO) ProtectedDialContext(ctx context.Context, network, address string) (net.Conn, error) {
return nil, nil
}
func (p *pcapPacketIO) SetVerdict(pkt Packet, v Verdict, newPacket []byte) error {
return nil
}
func (p *pcapPacketIO) SetCancelFunc(cancelFunc context.CancelFunc) error {
p.ioCancel = cancelFunc
return nil
}
func (p *pcapPacketIO) Close() error {
return nil
}
// Intentionally slow down the replay
// In realtime mode, this is to match the timestamps in the capture
// In non realtime mode, this helps to avoid flooding the workers
func (p *pcapPacketIO) wait(packet gopacket.Packet) error {
if !p.config.Realtime {
time.Sleep(p.config.ReplayDelay)
return nil
}
if p.lastTime == nil {
p.lastTime = &packet.Metadata().Timestamp
} else {
t := packet.Metadata().Timestamp.Sub(*p.lastTime)
time.Sleep(t)
p.lastTime = &packet.Metadata().Timestamp
}
return nil
}
var _ Packet = (*pcapPacket)(nil)
type pcapPacket struct {
streamID uint32
data []byte
}
func (p *pcapPacket) StreamID() uint32 {
return p.streamID
}
func (p *pcapPacket) Data() []byte {
return p.data
}