restic/rclone: PROPFIND für Dateien, MKCOL-Fix, Logging, Cache

- PROPFIND auf Dateipfade (rclone-Verifizierung nach PUT)
- MKCOL: 'already exists' -> 201 statt 500
- resolveResource: name.bin-Fallback für Dateien ohne Erweiterung
- recentFileCache für neu erstellte Dateien (API-Verzögerung)
- Logging: webdav-debug.log, webdav-errors.log, REQ/RES
- start-webdav.cmd: Log-Ausgabe in Datei, PORT aus .env
- Troubleshooting-Doku für restic 500-Fehler

Made-with: Cursor
This commit is contained in:
2026-02-28 16:11:22 +01:00
parent bbf3b899f7
commit b463579896
5 changed files with 153 additions and 26 deletions

View File

@@ -9,7 +9,7 @@ CRYPTO_SECRET=6KYQBP847D4ATSFA
# Für Namensentschlüsselung (CRYPTO_SECRET2). Falls nicht gesetzt, wird CRYPTO_SECRET verwendet.
# CRYPTO_SECRET2=6KYQBP847D4ATSFA
# DEBUG=1 # Salt-Decryption testen (ob CRYPTO_SECRET stimmt)
# DEBUG=1 # Salt-Decryption testen; PUT-Logging (Pfad, Body-Größe, Stacktrace bei Fehlern)
# Browser-Token (für token-test.js und WebDAV) aus drive.internxt.com localStorage
# INXT_TOKEN= # xNewToken

View File

@@ -89,6 +89,13 @@ restic -r rclone:internxt-webdav:repo-name init
Der Server erstellt fehlende Ordner rekursiv (MKCOL). Bei 500-Fehlern: Server-Log prüfen (`PUT Fehler:`), Token mit `npm run token-refresh` erneuern.
### Restic „object not found“ / 500
1. **Port prüfen:** rclone-URL muss exakt dem Server-Port entsprechen. Steht in der Konsole z.B. `http://127.0.0.1:3010`, dann in rclone `url = http://127.0.0.1:3010` eintragen.
2. **Nur einen Server:** `npm start` beenden (Ctrl+C), dann nur `scripts\start-webdav.cmd` nutzen sonst antwortet evtl. ein alter Prozess.
3. **rclone config:** `rclone config` → Remote `internxt-webdav` → `url` = `http://127.0.0.1:PORT` (PORT aus Server-Start).
4. **Logs:** `logs\webdav-errors.log` und `logs\webdav-debug.log` prüfen dort steht, welche Anfrage 4xx/5xx bekommt.
## WebDAV-Credentials (für Duplicati, Explorer)
Der Server erwartet **Basic Auth**. Ohne `WEBDAV_USER`/`WEBDAV_PASS` in `.env` akzeptiert er **beliebige** Credentials Sie können in Duplicati z.B. Benutzername `backup` und Passwort `geheim` eintragen. Mit `WEBDAV_USER` und `WEBDAV_PASS` werden nur diese Credentials akzeptiert.

View File

@@ -4,9 +4,9 @@ REM In Duplicati: Einstellungen -> Erweitert -> Scripts -> Vor dem Backup ausfue
REM Pfad: C:\Pfad\zu\internxt-webdav\scripts\start-webdav.cmd
REM Optional: Port als Argument (z.B. start-webdav.cmd 8080)
if "%1"=="" (set PORT=3005) else (set PORT=%1)
cd /d "%~dp0.."
if "%1"=="" (set PORT=3005) else (set PORT=%1)
for /f "tokens=2 delims==" %%a in ('findstr /B "PORT=" .env 2^>nul') do set PORT=%%a
REM .env und Token pruefen
if not exist .env (
@@ -26,8 +26,11 @@ if %errorlevel% equ 0 (
exit /b 0
)
echo Starte WebDAV-Server...
start /B node src/server.js > nul 2>&1
if not exist "%~dp0..\logs" mkdir "%~dp0..\logs"
set LOGFILE=%~dp0..\logs\webdav.log
echo [%date% %time%] WebDAV-Server starten... >> "%LOGFILE%"
echo Starte WebDAV-Server... Log: %LOGFILE%
start /B node src/server.js >> "%LOGFILE%" 2>&1
REM Warten und pruefen ob Server antwortet (OPTIONS braucht keine Auth)
set RETRIES=0

View File

@@ -107,6 +107,7 @@ export async function resolveResource(storage, rootFolderUuid, path) {
const bucket = file.bucket ?? file.bucket_id;
const fileId = file.fileId ?? file.file_id ?? file.networkFileId;
const name = getPlainName(file.name, file.plain_name ?? file.plainName, null, file.folder_id ?? file.folderId);
const size = file.size ?? file.file_size ?? 0;
return {
uuid: file.uuid,
type: 'file',
@@ -114,6 +115,7 @@ export async function resolveResource(storage, rootFolderUuid, path) {
parentUuid: parent.uuid,
bucket,
fileId,
size,
};
}
}
@@ -127,14 +129,21 @@ export async function resolveResource(storage, rootFolderUuid, path) {
return { uuid: folder.uuid, type: 'folder', name, parentUuid: parent.uuid };
}
const file = content?.files?.find((f) => {
let file = content?.files?.find((f) => {
const name = getPlainName(f.name, f.plain_name ?? f.plainName, null, f.folder_id ?? f.folderId);
return sanitize(name).toLowerCase() === sanitize(childName).toLowerCase();
});
if (!file && !childName.includes('.')) {
file = content?.files?.find((f) => {
const name = getPlainName(f.name, f.plain_name ?? f.plainName, null, f.folder_id ?? f.folderId);
return sanitize(name).toLowerCase() === sanitize(childName + '.bin').toLowerCase();
});
}
if (file) {
const bucket = file.bucket ?? file.bucket_id;
const fileId = file.fileId ?? file.file_id ?? file.networkFileId;
const name = getPlainName(file.name, file.plain_name ?? file.plainName, null, file.folder_id ?? file.folderId);
const size = file.size ?? file.file_size ?? 0;
return {
uuid: file.uuid,
type: 'file',
@@ -142,6 +151,7 @@ export async function resolveResource(storage, rootFolderUuid, path) {
parentUuid: parent.uuid,
bucket,
fileId,
size,
};
}

View File

@@ -6,6 +6,8 @@
*/
import 'dotenv/config';
import fs from 'fs';
import path from 'path';
import express from 'express';
import { createClients, refreshUser } from './internxt-client.js';
import { pathToSegments, segmentsToPath, listFolder, resolveFolder, resolveResource } from './path-resolver.js';
@@ -22,6 +24,47 @@ if (!token) {
process.exit(1);
}
const LOG_DIR = path.join(process.cwd(), 'logs');
/** Schreibt in logs/webdav-debug.log (separate Datei, kein Konflikt mit stdout→webdav.log) */
function logToFile(...args) {
const msg = args.map((a) => (typeof a === 'object' ? JSON.stringify(a) : String(a))).join(' ') + '\n';
try {
fs.mkdirSync(LOG_DIR, { recursive: true });
fs.appendFileSync(path.join(LOG_DIR, 'webdav-debug.log'), `[${new Date().toISOString()}] ${msg}`);
} catch (_) {}
}
/** Schreibt Fehler in logs/webdav-errors.log (separate Datei, kein Konflikt mit stdout) */
function logError(...args) {
const msg = args.map((a) => (typeof a === 'object' ? JSON.stringify(a) : String(a))).join(' ') + '\n';
try {
fs.mkdirSync(LOG_DIR, { recursive: true });
fs.appendFileSync(path.join(LOG_DIR, 'webdav-errors.log'), `[${new Date().toISOString()}] ${msg}`);
} catch (e) {
console.error('logError failed:', e.message);
}
}
process.on('unhandledRejection', (reason, promise) => {
logError('unhandledRejection', reason);
});
process.on('uncaughtException', (err) => {
logError('uncaughtException', err.message, err.stack);
});
// Fehlerdatei beim Start anlegen prüft, ob dieser Prozess die neue Version läuft
logError('Server gestartet (Version mit Fehler-Logging)');
/** Cache für neu erstellte Dateien rclone verifiziert per GET direkt nach PUT; API kann verzögert sein */
const recentFileCache = new Map();
const CACHE_TTL_MS = 60_000;
function cacheRecentFile(pathKey, resource) {
recentFileCache.set(pathKey, resource);
setTimeout(() => recentFileCache.delete(pathKey), CACHE_TTL_MS);
}
const app = express();
// WebDAV-Credentials: Wenn gesetzt, werden Client-Credentials dagegen geprüft.
@@ -67,6 +110,21 @@ function basicAuth(req, res, next) {
app.use(basicAuth);
app.use((req, res, next) => {
if (req.url && req.url.includes('restic')) {
logToFile('REQ', req.method, req.url);
}
const origSend = res.send;
res.send = function (...args) {
if (req.url && req.url.includes('restic')) {
logToFile('RES', req.method, req.url, 'status:', res.statusCode);
if (res.statusCode >= 400) logError('HTTP', res.statusCode, req.method, req.url);
}
return origSend.apply(this, args);
};
next();
});
// Request-Body: PUT/POST als Raw (Datei-Upload), PROPFIND als Text
app.use(express.raw({ type: (req) => req.method === 'PUT' || req.method === 'POST', limit: '1gb' }));
app.use(express.text({ type: 'application/xml', limit: '1kb' }));
@@ -170,7 +228,7 @@ async function handlePropfind(req, res) {
const baseUrl = `${req.protocol}://${req.get('host')}`;
try {
const { users, storage } = createClients(token);
const { storage } = createClients(token);
const refresh = await refreshUser(token);
const user = refresh.user;
const rootUuid = user?.rootFolderUuid || user?.rootFolderId || user?.root_folder_id;
@@ -179,6 +237,25 @@ async function handlePropfind(req, res) {
return;
}
// PROPFIND auf Datei (z.B. rclone-Verifizierung nach PUT)
let resource = await resolveResource(storage, rootUuid, path);
if (!resource) resource = recentFileCache.get(path);
if (resource && resource.type === 'file') {
const segments = pathToSegments(path);
const fileName = segments[segments.length - 1] || 'file';
const items = [{
path,
name: resource.name || fileName,
isCollection: false,
updatedAt: new Date().toISOString(),
size: resource.size ?? 0,
}];
const xml = buildPropfindResponse(baseUrl, items).replace(/\0/g, '');
res.set('Content-Type', 'application/xml; charset="utf-8"');
res.status(207).send(xml);
return;
}
const listing = await listFolder(storage, rootUuid, path);
if (!listing) {
res.status(404).send('Nicht gefunden');
@@ -335,6 +412,10 @@ async function handleMkcol(req, res) {
await createPromise;
res.status(201).send();
} catch (err) {
if (err?.message?.toLowerCase().includes('already exists')) {
res.status(201).send();
return;
}
console.error('MKCOL Fehler:', err.message);
if (err.message?.includes('Token') || err.response?.status === 401) {
res.status(401).send('Nicht autorisiert Token erneuern: https://drive.internxt.com');
@@ -473,12 +554,10 @@ async function handleMove(req, res) {
* GET Handler Datei herunterladen
*/
async function handleGet(req, res) {
let path = req.url || '/';
try {
path = decodeURIComponent(path);
} catch (_) {}
let path = getPathFromRequest(req);
if (!path.startsWith('/')) path = '/' + path;
if (path.endsWith('/')) path = path.slice(0, -1);
path = sanitizeForPath(path);
if (path === '/') {
res.status(405).send('Verzeichnis kann nicht heruntergeladen werden');
return;
@@ -491,7 +570,11 @@ async function handleGet(req, res) {
try {
const { storage, rootUuid } = await getContext();
const resource = await resolveResource(storage, rootUuid, path);
let resource = await resolveResource(storage, rootUuid, path);
if (!resource) {
resource = recentFileCache.get(path);
if (resource) logToFile('GET cache hit', path);
}
if (!resource) {
res.status(404).send('Nicht gefunden');
return;
@@ -543,12 +626,10 @@ async function handleGet(req, res) {
* HEAD Handler wie GET, aber nur Header
*/
async function handleHead(req, res) {
let path = req.url || '/';
try {
path = decodeURIComponent(path);
} catch (_) {}
let path = getPathFromRequest(req);
if (!path.startsWith('/')) path = '/' + path;
if (path.endsWith('/')) path = path.slice(0, -1);
path = sanitizeForPath(path);
if (path === '/') {
res.status(405).send();
return;
@@ -556,7 +637,8 @@ async function handleHead(req, res) {
try {
const { storage, rootUuid } = await getContext();
const resource = await resolveResource(storage, rootUuid, path);
let resource = await resolveResource(storage, rootUuid, path);
if (!resource) resource = recentFileCache.get(path);
if (!resource) {
res.status(404).send();
return;
@@ -593,6 +675,10 @@ async function handlePut(req, res) {
if (path.endsWith('/')) path = path.slice(0, -1);
path = sanitizeForPath(path);
if (process.env.DEBUG) {
console.log('PUT', path, 'Content-Length:', req.headers['content-length'], 'Body:', req.body?.length ?? 0);
}
if (path === '/') {
res.status(403).send('Root kann nicht überschrieben werden');
return;
@@ -610,7 +696,9 @@ async function handlePut(req, res) {
}
try {
logToFile('PUT try start', path);
const { storage, rootUuid } = await getContext();
logToFile('PUT getContext OK', path);
const segments = pathToSegments(path);
const parentPath = segmentsToPath(segments.slice(0, -1));
const fileName = segments[segments.length - 1];
@@ -649,15 +737,24 @@ async function handlePut(req, res) {
}
const { plainName, type } = parseFileName(fileName);
const fileId = await uploadFileBuffer({
let fileId;
logToFile('PUT Upload start', path);
try {
fileId = await uploadFileBuffer({
bucketId,
bridgeUser,
bridgePass,
mnemonic,
buffer: uploadBuffer,
});
} catch (uploadErr) {
logError('PUT Upload (Bridge) fehlgeschlagen', path, uploadErr.message);
throw uploadErr;
}
logToFile('PUT Upload OK', path);
const date = new Date().toISOString();
logToFile('PUT createFileEntry start', path);
const doCreate = async () => {
await storage.createFileEntryByUuid({
@@ -675,7 +772,11 @@ async function handlePut(req, res) {
try {
await doCreate();
logToFile('PUT createFileEntry OK', path);
const fullName = type ? `${plainName}.${type}` : plainName;
cacheRecentFile(path, { type: 'file', bucket: bucketId, fileId, name: fullName, size: buffer.length });
} catch (createErr) {
logError('PUT createFileEntry fehlgeschlagen', path, createErr.message);
// "File already exists" Datei per Namen löschen und erneut versuchen
if (createErr?.message?.toLowerCase().includes('already exists')) {
const [contentPromise] = storage.getFolderContentByUuid({ folderUuid: parent.uuid });
@@ -700,7 +801,12 @@ async function handlePut(req, res) {
res.status(201).send();
} catch (err) {
console.error('PUT Fehler:', path, err.message);
logError('PUT CATCH', path, err?.message ?? String(err), err?.response?.status, err?.response?.data);
const apiErr = err.response?.data ? JSON.stringify(err.response.data) : '';
const status = err.response?.status;
if (process.env.DEBUG) logError('Stack:', err.stack);
console.error('PUT Fehler:', path, err.message, status ? `HTTP ${status}` : '', apiErr || '');
if (process.env.DEBUG) console.error(err.stack);
if (err.message?.includes('Token') || err.response?.status === 401) {
res.status(401).send('Nicht autorisiert Token erneuern: https://drive.internxt.com');
return;
@@ -738,7 +844,7 @@ app.use((req, res, next) => {
}
if (req.method === 'PUT' || req.method === 'POST') {
handlePut(req, res).catch((err) => {
console.error('PUT unhandled:', err);
logError('PUT unhandled', err?.message, err?.stack);
if (!res.headersSent) res.status(500).send(err.message);
});
return;
@@ -770,5 +876,6 @@ app.use((req, res, next) => {
app.listen(PORT, () => {
console.log(`Internxt WebDAV Server http://127.0.0.1:${PORT}`);
console.log('Phase 14: PROPFIND, MKCOL, DELETE, MOVE, GET, PUT aktiv.');
console.log(`rclone/restic: URL muss http://127.0.0.1:${PORT} sein (gleicher Port!)`);
console.log('Verwendung: z.B. Windows Explorer → Netzlaufwerk verbinden');
});